file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
generate_examples.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generate a series of TensorFlow graphs that become tflite test cases. Usage: generate_examples <output directory> bazel run //tensorflow/contrib/lite/testing:generate_examples """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import itertools import os import re import sys import tempfile import traceback import zipfile import numpy as np from six import StringIO from six.moves import xrange # TODO(aselle): Disable GPU for now os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # pylint: disable=g-import-not-at-top import tensorflow as tf from google.protobuf import text_format # TODO(aselle): switch to TensorFlow's resource_loader from tensorflow.contrib.lite.testing import generate_examples_report as report_lib from tensorflow.python.framework import graph_util as tf_graph_util from tensorflow.python.ops import rnn parser = argparse.ArgumentParser(description="Script to generate TFLite tests.") parser.add_argument("output_path", help="Directory where the outputs will be go.") parser.add_argument("--zip_to_output", type=str, help="Particular zip to output.", required=False) parser.add_argument("--toco", type=str, help="Path to toco tool.", required=True) parser.add_argument( "--known_bugs_are_errors", action="store_true", help=("If a particular model is affected by a known bug," " count it as a toco error.")) parser.add_argument( "--ignore_toco_errors", action="store_true", help="Raise an exception if any toco error is encountered.") parser.add_argument( "--save_graphdefs", action="store_true", help="Include intermediate graphdefs in the output zip files.") RANDOM_SEED = 342 TEST_INPUT_DEPTH = 3 # A map from regular expression to bug number. Any test failure with label # matching the expression will be considered due to the corresponding bug. KNOWN_BUGS = { # TOCO doesn't support scalars as input. r"relu.*input_shape=\[\]": "67587484", r"sigmoid.*input_shape=\[\]": "67645668", # Concat doesn't work with a single input tensor r"concat.*num_tensors=1": "67378344", # Transposition in MatMul is not supported. r"fully_connected.*transpose_.=True": "67586970", # Softmax graphs are too complex. r"softmax.*dim=0": "67749831", r"softmax.*input_shape=\[1,3,4,3\]": "67749831", # SpaceToDepth only supports float32. r"space_to_depth.*(float16|int32|uint8|int64)": "68018134", # BatchToSpaceND only supports 4D tensors. r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733", # Div will use floordiv. r"div.*int32": "72051395", # TOCO require matching dimensions in strided_slice. r"strided_slice.*begin=\[0\].*end=\[1\].*": "73170889", # No support for SplitV r"split.*num_or_size_splits=\[2,2\]": "73377559", # Needs support for dimensions other than the last one in argmax. r"arg_max.*axis=0.*": "77546240", r"arg_max.*axis=1.*": "77546240", r"arg_max.*axis=2.*": "77546240", } class ExtraTocoOptions(object): """Additonal toco options besides input, output, shape.""" def __init__(self): # Whether to ignore control dependency nodes. self.drop_control_dependency = False # Allow custom ops in the toco conversion. self.allow_custom_ops = False # Rnn states that are used to support rnn / lstm cells. self.rnn_states = None def toco_options(data_types, input_arrays, output_arrays, shapes, extra_toco_options=ExtraTocoOptions()): """Create TOCO options to process a model. Args: data_types: input and inference types used by TOCO. input_arrays: names of the input tensors output_arrays: name of the output tensors shapes: shapes of the input tensors extra_toco_options: additional toco options Returns: the options in a string. """ shape_str = ":".join([",".join(str(y) for y in x) for x in shapes]) inference_type = "FLOAT" # TODO(ahentz): if we get multi-input quantization to work we need this # to change if data_types[0] == "QUANTIZED_UINT8": inference_type = "QUANTIZED_UINT8" s = (" --input_data_types=%s" % ",".join(data_types) + " --inference_type=%s" % inference_type + " --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" + " --input_arrays=%s" % ",".join(input_arrays) + " --input_shapes=%s" % shape_str + " --output_arrays=%s" % ",".join(output_arrays)) if extra_toco_options.drop_control_dependency: s += " --drop_control_dependency" if extra_toco_options.allow_custom_ops: s += " --allow_custom_ops" if extra_toco_options.rnn_states: s += (" --rnn_states='" + extra_toco_options.rnn_states + "'") return s def write_examples(fp, examples): """Given a list `examples`, write a text format representation. The file format is csv like with a simple repeated pattern. We would ike to use proto here, but we can't yet due to interfacing with the Android team using this format. Args: fp: File-like object to write to. examples: Example dictionary consiting of keys "inputs" and "outputs" """ def write_tensor(fp, x): """Write tensor in file format supported by TFLITE example.""" fp.write("dtype,%s\n" % x.dtype) fp.write("shape," + ",".join(map(str, x.shape)) + "\n") # Output 9 digits after the point to ensure the precision is good enough. values = ["{:.9f}".format(value) for value in list(x.flatten())] fp.write("values," + ",".join(values) + "\n") fp.write("test_cases,%d\n" % len(examples)) for example in examples: fp.write("inputs,%d\n" % len(example["inputs"])) for i in example["inputs"]: write_tensor(fp, i) fp.write("outputs,%d\n" % len(example["outputs"])) for i in example["outputs"]: write_tensor(fp, i) def write_test_cases(fp, model_name, examples): """Given a dictionary of `examples`, write a text format representation. The file format is protocol-buffer-like, even though we don't use proto due to the needs of the Android team. Args: fp: File-like object to write to. model_name: Filename where the model was written to, relative to filename. examples: Example dictionary consiting of keys "inputs" and "outputs" """ fp.write("load_model: %s\n" % os.path.basename(model_name)) for example in examples: fp.write("reshape {\n") for t in example["inputs"]: fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n") fp.write("}\n") fp.write("invoke {\n") for t in example["inputs"]: values = ["{:.9f}".format(value) for value in list(t.flatten())] fp.write(" input: \"" + ",".join(values) + "\"\n") for t in example["outputs"]: values = ["{:.9f}".format(value) for value in list(t.flatten())] fp.write(" output: \"" + ",".join(values) + "\"\n") fp.write("}\n") _TF_TYPE_INFO = { tf.float32: (np.float32, "FLOAT"), tf.float16: (np.float16, "FLOAT"), tf.int32: (np.int32, "INT32"), tf.uint8: (np.uint8, "QUANTIZED_UINT8"), tf.int64: (np.int64, "INT64"), } def create_tensor_data(dtype, shape, min_value=-100, max_value=100): """Build tensor data spreading the range [min_value, max_value).""" if dtype in _TF_TYPE_INFO: dtype = _TF_TYPE_INFO[dtype][0] if dtype in (tf.float32, tf.float16): value = (max_value-min_value)*np.random.random_sample(shape)+min_value elif dtype in (tf.int32, tf.uint8, tf.int64): value = np.random.randint(min_value, max_value+1, shape) return value.astype(dtype) def freeze_graph(session, outputs): """Freeze the current graph. Args: session: Tensorflow sessions containing the graph outputs: List of output tensors Returns: The frozen graph_def. """ return tf_graph_util.convert_variables_to_constants( session, session.graph.as_graph_def(), [x.op.name for x in outputs]) def make_control_dep_tests(zip_path): """Make a set of tests that use control dependencies.""" test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32) assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1) with tf.control_dependencies([assert_op]): out = tf.nn.conv2d(input_tensor, filter_value, strides=(1, 1, 1, 1), padding="SAME") return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(tf.float32, parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) extra_toco_options = ExtraTocoOptions() extra_toco_options.drop_control_dependency = True make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs, extra_toco_options) def toco_convert(graph_def_str, input_tensors, output_tensors, extra_toco_options): """Convert a model's graph def into a tflite model. NOTE: this currently shells out to the toco binary, but we would like convert to Python API tooling in the future. Args: graph_def_str: Graph def proto in serialized string format. input_tensors: List of input tensor tuples `(name, shape, type)`. output_tensors: List of output tensors (names). extra_toco_options: Additional toco options. Returns: output tflite model, log_txt from conversion or None, log_txt if it did not convert properly. """ data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors] opts = toco_options( data_types=data_types, input_arrays=[x[0] for x in input_tensors], shapes=[x[1] for x in input_tensors], output_arrays=output_tensors, extra_toco_options=extra_toco_options) with tempfile.NamedTemporaryFile() as graphdef_file, \ tempfile.NamedTemporaryFile() as output_file, \ tempfile.NamedTemporaryFile("w+") as stdout_file: graphdef_file.write(graph_def_str) graphdef_file.flush() # TODO(aselle): Switch this to subprocess at some point. cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" % (bin_path, graphdef_file.name, output_file.name, opts, stdout_file.name)) exit_code = os.system(cmd) log = ( cmd + "exited with code %d" % exit_code + "\n------------------\n" + stdout_file.read()) return (None if exit_code != 0 else output_file.read()), log def normalize_output_name(output_name): """Remove :0 suffix from tensor names.""" return output_name.split(":")[0] if output_name.endswith( ":0") else output_name def make_zip_of_tests(zip_path, test_parameters, make_graph, make_test_inputs, extra_toco_options=ExtraTocoOptions(), use_frozen_graph=False): """Helper to make a zip file of a bunch of TensorFlow models. This does a cartestian product of the dictionary of test_parameters and calls make_graph() for each item in the cartestian product set. If the graph is built successfully, then make_test_inputs() is called to build expected input/output value pairs. The model is then converted to tflite with toco, and the examples are serialized with the tflite model into a zip file (2 files per item in the cartesian product set). Args: zip_path: Path of zip file to write test_parameters: Dictionary mapping to lists for each parameter. e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}` make_graph: function that takes current parameters and returns tuple `[input1, input2, ...], [output1, output2, ...]` make_test_inputs: function taking `curr_params`, `session`, `input_tensors`, `output_tensors` and returns tuple `(input_values, output_values)`. extra_toco_options: Additional toco options. use_frozen_graph: Whether or not freeze graph before toco converter. Raises: RuntimeError: if there are toco errors that can't be ignored. """ # TODO(aselle): Make this allow multiple inputs outputs. archive = zipfile.PyZipFile(zip_path, "w") zip_manifest = [] convert_report = [] toco_errors = 0 for parameters in test_parameters: keys = parameters.keys() for curr in itertools.product(*parameters.values()): label = zip_path.replace(".zip", "") + (",".join( "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", "")) if label[0] == "/": label = label[1:] param_dict = dict(zip(keys, curr)) def build_example(label, param_dict_real): """Build the model with parameter values set in param_dict_real. Args: label: Label of the model (i.e. the filename in the zip). param_dict_real: Parameter dictionary (arguments to the factories make_graph and make_test_inputs) Returns: (tflite_model_binary, report) where tflite_model_binary is the serialized flatbuffer as a string and report is a dictionary with keys `toco_log` (log of toco conversion), `tf_log` (log of tf conversion), `toco` (a string of success status of the conversion), `tf` (a string success status of the conversion). """ np.random.seed(RANDOM_SEED) report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED} # Build graph report["tf_log"] = "" report["toco_log"] = "" tf.reset_default_graph() with tf.device("/cpu:0"): try: inputs, outputs = make_graph(param_dict_real) except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report sess = tf.Session() try: baseline_inputs, baseline_outputs = (make_test_inputs( param_dict_real, sess, inputs, outputs)) except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError): report["tf_log"] += traceback.format_exc() return None, report report["toco"] = report_lib.FAILED report["tf"] = report_lib.SUCCESS # Convert graph to toco input_tensors = [(input_tensor.name.split(":")[0], input_tensor.get_shape(), input_tensor.dtype) for input_tensor in inputs] output_tensors = [normalize_output_name(out.name) for out in outputs] graph_def = freeze_graph( sess, tf.global_variables() + inputs + outputs) if use_frozen_graph else sess.graph_def tflite_model_binary, toco_log = toco_convert( graph_def.SerializeToString(), input_tensors, output_tensors, extra_toco_options) report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None else report_lib.FAILED) report["toco_log"] = toco_log if FLAGS.save_graphdefs: archive.writestr(label + ".pb", text_format.MessageToString(graph_def), zipfile.ZIP_DEFLATED) if tflite_model_binary: archive.writestr(label + ".bin", tflite_model_binary, zipfile.ZIP_DEFLATED) example = {"inputs": baseline_inputs, "outputs": baseline_outputs} example_fp = StringIO() write_examples(example_fp, [example]) archive.writestr(label + ".inputs", example_fp.getvalue(), zipfile.ZIP_DEFLATED) example_fp2 = StringIO() write_test_cases(example_fp2, label + ".bin", [example]) archive.writestr(label + "_tests.txt", example_fp2.getvalue(), zipfile.ZIP_DEFLATED) zip_manifest.append(label + "\n") return tflite_model_binary, report _, report = build_example(label, param_dict) if report["toco"] == report_lib.FAILED: ignore_error = False if not FLAGS.known_bugs_are_errors: for pattern, bug_number in KNOWN_BUGS.items(): if re.search(pattern, label): print("Ignored TOCO error due to bug %s" % bug_number) ignore_error = True if not ignore_error: toco_errors += 1 print("-----------------\ntoco error!\n%s\n-----------------\n" % report["toco_log"]) convert_report.append((param_dict, report)) report_io = StringIO() report_lib.make_report_table(report_io, zip_path, convert_report) archive.writestr("report.html", report_io.getvalue()) archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED) # Log statistics of what succeeded total_conversions = len(convert_report) tf_success = sum(1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS) toco_success = sum(1 for x in convert_report if x[1]["toco"] == report_lib.SUCCESS) percent = 0 if tf_success > 0: percent = float(toco_success) / float(tf_success) * 100. tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs " " and %d TOCO converted graphs (%.1f%%"), zip_path, total_conversions, tf_success, toco_success, percent) if not FLAGS.ignore_toco_errors and toco_errors > 0: raise RuntimeError( "Found %d errors while generating toco models" % toco_errors) def make_pool_tests(pool_op_in): """Make a set of tests to do average pooling. Args: pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool`. Returns: A function representing the true generator (after curried pool_op_in). """ pool_op = pool_op_in def f(zip_path): """Actual function that generates examples. Args: zip_path: path to write zip to. """ # Chose a set of parameters test_parameters = [{ "ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], "strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], # TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]). "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = pool_op( input_tensor, ksize=parameters["ksize"], strides=parameters["strides"], data_format=parameters["data_format"], padding=parameters["padding"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(tf.float32, parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) return f def make_l2_pool_tests(zip_path): make_pool_tests(make_l2_pool)(zip_path) def make_avg_pool_tests(zip_path): make_pool_tests(tf.nn.avg_pool)(zip_path) def make_max_pool_tests(zip_path): make_pool_tests(tf.nn.max_pool)(zip_path) def make_relu_tests(zip_path): """Make a set of tests to do relu.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.relu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_relu1_tests(zip_path): """Make a set of tests to do relu1.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) # Note that the following is not supported: # out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0)) out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0)) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-3, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_relu6_tests(zip_path): """Make a set of tests to do relu6.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.relu(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-3, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) # This function tests various TensorFLow functions that generates Const op, # including `tf.ones`, `tf.zeros` and random functions. def make_constant_tests(zip_path): """Make a set of tests to do constant ops.""" test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]], }] def build_graph(parameters): # Since Toco & Tflite can't have a single constant op in the entire graph, # this test adds a zero tensor with a constant op tensor. input1 = tf.placeholder(dtype=parameters["dtype"], name="input1", shape=parameters["input_shape"]) out = tf.ones(parameters["input_shape"], dtype=parameters["dtype"]) + input1 return [input1], [out] def build_inputs(parameters, sess, inputs, outputs): input1 = np.zeros(parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0]) return [input1], sess.run(outputs, feed_dict={inputs[0]: input1}) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_binary_op_tests(zip_path, binary_operator): """Make a set of tests to do add with and without broadcast.""" # These parameters are split because we don't support broadcasting. test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape_1": [[1, 3, 4, 3]], "input_shape_2": [[1, 3, 4, 3]], "activation": [True] }, { "dtype": [tf.float32], "input_shape_1": [[5]], "input_shape_2": [[5]], "activation": [False, True] }, { "dtype": [tf.float32], "input_shape_1": [[1, 3, 4, 3]], "input_shape_2": [[3]], "activation": [True] }] def build_graph(parameters): """Builds the graph given the current parameters.""" input1 = tf.placeholder( dtype=parameters["dtype"], name="input1", shape=parameters["input_shape_1"]) input2 = tf.placeholder( dtype=parameters["dtype"], name="input2", shape=parameters["input_shape_2"]) out = binary_operator(input1, input2) if parameters["activation"]: out = tf.nn.relu(out) return [input1, input2], [out] def build_inputs(parameters, sess, inputs, outputs): """Builds operand inputs for op.""" input1 = create_tensor_data(parameters["dtype"], parameters["input_shape_1"]) input2 = create_tensor_data(parameters["dtype"], parameters["input_shape_2"]) return [input1, input2], sess.run( outputs, feed_dict={ inputs[0]: input1, inputs[1]: input2 }) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_mean_tests(zip_path): """Make a set of tests to do mean.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape": [[3, 2, 4]], "axis": [ None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0], [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3] ], "const_axis": [True, False], "keepdims": [True, False], }, { "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape": [[1, 224, 224, 3]], "axis": [ None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3], [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2], [2, 2, 3], [-3, -3, -4], [-3, 2, 1] ], "const_axis": [True, False], "keepdims": [True, False], }] def build_graph(parameters): """Build the mean op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) # Get axis as either a placeholder or constants. if parameters["const_axis"]: axis = parameters["axis"] input_tensors = [input_tensor] else: if isinstance(parameters["axis"], list): shape = [len(parameters["axis"])] else: shape = [0] # shape for None or integers. axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape) input_tensors = [input_tensor, axis] out = tf.reduce_mean( input_tensor, axis=axis, keepdims=parameters["keepdims"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) ] if not parameters["const_axis"]: if parameters["axis"]: values.append(np.array(parameters["axis"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_exp_tests(zip_path): """Make a set of tests to do exp.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the exp op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.exp(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape"], min_value=-100, max_value=9) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_log_softmax_tests(zip_path): """Make a set of tests to do log_softmax.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape": [[1, 100], [4, 2], [5, 224]], }] def build_graph(parameters): """Build the log_softmax op testing graph.""" input_tensor = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) out = tf.nn.log_softmax(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data( parameters["input_dtype"], parameters["input_shape"], min_value=-100, max_value=9) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_maximum_tests(zip_path): """Make a set of tests to do maximum.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape_1": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], "input_shape_2": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the maximum op testing graph.""" input_tensor_1 = tf.placeholder( dtype=parameters["input_dtype"], name="input_1", shape=parameters["input_shape_1"]) input_tensor_2 = tf.placeholder( dtype=parameters["input_dtype"], name="input_2", shape=parameters["input_shape_2"]) out = tf.maximum(input_tensor_1, input_tensor_2) return [input_tensor_1, input_tensor_2], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape_1"]), create_tensor_data(parameters["input_dtype"], parameters["input_shape_2"]) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_minimum_tests(zip_path): """Make a set of tests to do minimum.""" test_parameters = [{ "input_dtype": [tf.float32], "input_shape_1": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], "input_shape_2": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]], }] def build_graph(parameters): """Build the minimum op testing graph.""" input_tensor_1 = tf.placeholder( dtype=parameters["input_dtype"], name="input_1", shape=parameters["input_shape_1"]) input_tensor_2 = tf.placeholder( dtype=parameters["input_dtype"], name="input_2", shape=parameters["input_shape_2"]) out = tf.minimum(input_tensor_1, input_tensor_2) return [input_tensor_1, input_tensor_2], [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["input_dtype"], parameters["input_shape_1"]), create_tensor_data(parameters["input_dtype"], parameters["input_shape_2"]) ] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_binary_op_tests_func(binary_operator): """Return a function that does a test on a binary operator.""" return lambda zip_path: make_binary_op_tests(zip_path, binary_operator) def make_add_tests(zip_path): make_binary_op_tests(zip_path, tf.add) def make_div_tests(zip_path): make_binary_op_tests(zip_path, tf.div) def make_sub_tests(zip_path):
def make_mul_tests(zip_path): make_binary_op_tests(zip_path, tf.multiply) def make_gather_tests(zip_path): """Make a set of tests to do gather.""" test_parameters = [{ # TODO(mgubin): add string tests when they are supported by Toco. # TODO(mgubin): add tests for Nd indices when they are supported by # TfLite. "params_dtype": [tf.float32, tf.int32], "params_shape": [[10], [1, 2, 20]], "indices_dtype": [tf.int32], "indices_shape": [[3], [5]], "axis": [0, 1], }] def build_graph(parameters): """Build the gather op testing graph.""" params = tf.placeholder( dtype=parameters["params_dtype"], name="params", shape=parameters["params_shape"]) indices = tf.placeholder( dtype=parameters["indices_dtype"], name="indices", shape=parameters["indices_shape"]) out = tf.gather(params, indices, axis=parameters["axis"]) return [params, indices], [out] def build_inputs(parameters, sess, inputs, outputs): params = create_tensor_data(parameters["params_dtype"], parameters["params_shape"]) indices = create_tensor_data(parameters["indices_dtype"], parameters["indices_shape"], 0, parameters["params_shape"][0] - 1) return [params, indices], sess.run( outputs, feed_dict=dict(zip(inputs, [params, indices]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_global_batch_norm_tests(zip_path): """Make a set of tests to do batch_norm_with_global_normalization.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]], "epsilon": [0.1, 0.0001], "scale_after": [True, False], }] def build_graph(parameters): """Build the global batch norm testing graph.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) x_norm = tf.nn.batch_norm_with_global_normalization( x, mean, variance, scale, offset, parameters["epsilon"], parameters["scale_after"]) input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_fused_batch_norm_tests(zip_path): """Make a set of tests to do fused_batch_norm.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2]], "epsilon": [0.001, 0.1], }] def build_graph(parameters): """Build the testing graph for fused batch normalization.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) [x_norm, _, _] = tf.nn.fused_batch_norm( x, scale, offset, mean, variance, parameters["epsilon"], data_format="NHWC", is_training=False) input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_conv_tests(zip_path): """Make a set of tests to do convolution.""" test_parameters = [ { "input_shape": [[1, 3, 4, 3]], "filter_shape": [[1, 1, 3, 2]], "strides": [[1, 1, 1, 1], [1, 2, 3, 1]], "dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good "constant_filter": [True, False], }, { "input_shape": [[2, 14, 14, 2]], "filter_shape": [[6, 6, 2, 2]], "strides": [[1, 1, 1, 1], [1, 2, 3, 1]], "dilations": [[1, 1, 1, 1], [1, 2, 2, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], # TODO(aselle): NCHW would be good "constant_filter": [True, False], } ] def build_graph(parameters): """Build a conv graph given `parameters`.""" input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) # Get filter input either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: filter_input = create_tensor_data(np.float32, parameters["filter_shape"]) input_tensors = [input_tensor] else: filter_input = tf.placeholder( dtype=tf.float32, name="filter", shape=parameters["filter_shape"]) input_tensors = [input_tensor, filter_input] out = tf.nn.conv2d( input_tensor, filter_input, strides=parameters["strides"], dilations=parameters["dilations"], padding=parameters["padding"], data_format=parameters["data_format"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. values = [create_tensor_data(np.float32, parameters["input_shape"])] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, parameters["filter_shape"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_depthwiseconv_tests(zip_path): """Make a set of tests to do convolution.""" # Tensorflow only supports equal strides test_parameters = [ { "input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]], "filter_size": [[1, 1], [1, 2], [3, 3]], "strides": [[1, 1, 1, 1], [1, 3, 3, 1]], "channel_multiplier": [1, 2], "rate": [[1, 1]], "padding": ["SAME", "VALID"], "data_format": ["NHWC"], "constant_filter": [True, False], }, { "input_shape": [[1, 3, 4, 3]], "filter_size": [[1, 1]], "strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1] "channel_multiplier": [2], "rate": [[2, 2]], # Only [1, 1] is supported "padding": ["SAME"], "data_format": ["NHWC"], "constant_filter": [True, False], } ] def get_tensor_shapes(parameters): input_shape = parameters["input_shape"] filter_size = parameters["filter_size"] filter_shape = filter_size + [ input_shape[3], parameters["channel_multiplier"] ] return [input_shape, filter_shape] def build_graph(parameters): """Build a depthwise conv graph given `parameters`.""" input_shape, filter_shape = get_tensor_shapes(parameters) input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=input_shape) # Get filter input either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: filter_input = create_tensor_data(np.float32, filter_shape) input_tensors = [input_tensor] else: filter_input = tf.placeholder( dtype=tf.float32, name="filter", shape=filter_shape) input_tensors = [input_tensor, filter_input] out = tf.nn.depthwise_conv2d( input_tensor, filter_input, strides=parameters["strides"], rate=parameters["rate"], padding=parameters["padding"], data_format=parameters["data_format"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input) or 2 tensors # (input, filter) based on whether filter is constant or variable input. input_shape, filter_shape = get_tensor_shapes(parameters) values = [create_tensor_data(np.float32, input_shape)] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, filter_shape)) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_split_tests(zip_path): """Make a set of tests to do tf.split.""" test_parameters = [{ "input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]], "num_or_size_splits": [1, 2, 3, 4, 5, [2, 2]], "axis": [0, 1, 2, 3, -4, -3, -2, -1], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.split( input_tensor, parameters["num_or_size_splits"], parameters["axis"]) return [input_tensor], out def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(np.float32, parameters["input_shape"])] return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_concat_tests(zip_path): """Make a set of tests to do concatenation.""" test_parameters = [{ "base_shape": [[1, 3, 4, 3], [3, 4]], "num_tensors": [1, 2, 3, 4, 5, 6], "axis": [0, 1, 2, 3, -3, -2, -1], }] def get_shape(parameters, delta): """Return a tweaked version of 'base_shape'.""" axis = parameters["axis"] shape = parameters["base_shape"][:] if axis < 0: axis += len(shape) if axis < len(shape): shape[axis] += delta return shape def build_graph(parameters): all_tensors = [] for n in range(0, parameters["num_tensors"]): input_tensor = tf.placeholder(dtype=tf.float32, name=("input%d" % n), shape=get_shape(parameters, n)) all_tensors.append(input_tensor) out = tf.concat(all_tensors, parameters["axis"]) return all_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): all_values = [] for n in range(0, parameters["num_tensors"]): input_values = create_tensor_data(np.float32, get_shape(parameters, n)) all_values.append(input_values) return all_values, sess.run( outputs, feed_dict=dict(zip(inputs, all_values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_fully_connected_tests(zip_path): """Make a set of tests to do fully_connected.""" test_parameters = [{ "shape1": [[3, 3]], "shape2": [[3, 3]], "transpose_a": [True, False], "transpose_b": [True, False], "constant_filter": [True, False], }, { "shape1": [[4, 4], [1, 4], [4]], "shape2": [[4, 4], [4, 1], [4]], "transpose_a": [False], "transpose_b": [False], "constant_filter": [True, False], }, { "shape1": [[40, 37]], "shape2": [[37, 40]], "transpose_a": [False], "transpose_b": [False], "constant_filter": [True, False], }] def build_graph(parameters): """Build a matmul graph given `parameters`.""" input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1", shape=parameters["shape1"]) # Get input_tensor2 either as a placeholder or constants. Also get a list of # the input tensors that are represented as placeholders. if parameters["constant_filter"]: input_tensor2 = create_tensor_data(np.float32, parameters["shape2"]) input_tensors = [input_tensor1] else: input_tensor2 = tf.placeholder( dtype=tf.float32, name="input2", shape=parameters["shape2"]) input_tensors = [input_tensor1, input_tensor2] out = tf.matmul(input_tensor1, input_tensor2, transpose_a=parameters["transpose_a"], transpose_b=parameters["transpose_b"]) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): # Build list of input values either containing 1 tensor (input_values1) or 2 # tensors (input_values1, input_values2) based on whether the second input # is a constant or variable input. values = [create_tensor_data(np.float32, shape=parameters["shape1"])] if not parameters["constant_filter"]: values.append(create_tensor_data(np.float32, parameters["shape2"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_l2norm_tests(zip_path): """Make a set of tests to do l2norm.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], "dim": [0, 1, 2, 3, [2, 3], -2], "epsilon": [None, 1e-12, 1e-3], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) if parameters["epsilon"]: out = tf.nn.l2_normalize( input_tensor, parameters["dim"], epsilon=parameters["epsilon"]) else: out = tf.nn.l2_normalize(input_tensor, parameters["dim"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_local_response_norm_tests(zip_path): """Make a set of tests to do local_response_norm.""" # Chose a set of parameters test_parameters = [{ "input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]], "depth_radius": [None, 0, 1, 3, 4, 5], "bias": [None, 0.1, 0.3, -0.1], "alpha": [None, 1, 2, -3], "beta": [None, 0.5, 0.25, 2], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=tf.float32, name="input", shape=parameters["input_shape"]) out = tf.nn.local_response_normalization( input_tensor, depth_radius=parameters["depth_radius"], bias=parameters["bias"], alpha=parameters["alpha"], beta=parameters["beta"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data( np.float32, parameters["input_shape"], min_value=-4, max_value=10) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_pad_tests(zip_path): """Make a set of tests to do pad.""" # TODO(nupurgarg): Add test for tf.uint8. test_parameters = [ { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]], "paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0], [0, 0], [2, 3]]], "constant_paddings": [True, False], }, # Non-4D use case. { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 2], [0, 1, 2]], "paddings": [[[0, 1], [2, 3]]], "constant_paddings": [True, False], }, ] def build_graph(parameters): """Build a pad graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) # Get paddings as either a placeholder or constants. if parameters["constant_paddings"]: paddings = parameters["paddings"] input_tensors = [input_tensor] else: shape = [len(parameters["paddings"]), 2] paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape) input_tensors = [input_tensor, paddings] out = tf.pad(input_tensor, paddings=paddings) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_paddings"]: values.append(np.array(parameters["paddings"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_reshape_tests(zip_path): """Make a set of tests to do reshape.""" # All shapes below are suitable for tensors with 420 elements. test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]], "output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.reshape(input_tensor, shape=parameters["output_shape"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_resize_bilinear_tests(zip_path): """Make a set of tests to do resize_bilinear.""" test_parameters = [{ "dtype": [tf.float32, tf.int32], "input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]], "size": [[1, 1], [4, 3], [2, 2], [5, 6]], "align_corners": [None, True, False], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.image.resize_bilinear(input_tensor, size=parameters["size"], align_corners=parameters["align_corners"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_sigmoid_tests(zip_path): """Make a set of tests to do sigmoid.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.sigmoid(input_tensor) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_softmax_tests(zip_path): """Make a set of tests to do softmax.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 3, 4, 3], [2, 3]], "dim": [-1, 0], }, { "dtype": [tf.float32], "input_shape": [[4, 7]], "dim": [-1, 1], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.nn.softmax(input_tensor, dim=parameters["dim"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_space_to_depth_tests(zip_path): """Make a set of tests to do space_to_depth.""" test_parameters = [{ "dtype": [tf.float32, tf.float16, tf.int32, tf.uint8, tf.int64], "input_shape": [[2, 12, 24, 1]], "block_size": [2, 3, 4], }] def build_graph(parameters): input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_space_to_batch_nd_tests(zip_path): """Make a set of tests to do space_to_batch_nd.""" # TODO(nupurgarg): Add test for uint8. test_parameters = [ { "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]], "block_shape": [[1, 3], [2, 2]], "paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, { "dtype": [tf.float32], "input_shape": [[2, 3, 7, 3]], "block_shape": [[1, 3], [2, 2]], "paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others. { "dtype": [tf.float32], "input_shape": [[1, 4, 4, 4, 1, 1]], "block_shape": [[2, 2, 2]], "paddings": [[[0, 0], [0, 0], [0, 0]]], "constant_block_shape": [True, False], "constant_paddings": [True, False], }, ] def build_graph(parameters): """Build a space_to_batch graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) input_tensors = [input_tensor] # Get block_shape either as a const or as a placeholder (tensor). if parameters["constant_block_shape"]: block_shape = parameters["block_shape"] else: shape = [len(parameters["block_shape"])] block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape) input_tensors.append(block_shape) # Get paddings either as a const or as a placeholder (tensor). if parameters["constant_paddings"]: paddings = parameters["paddings"] else: shape = [len(parameters["paddings"]), 2] paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_block_shape"]: values.append(np.array(parameters["block_shape"])) if not parameters["constant_paddings"]: values.append(np.array(parameters["paddings"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_batch_to_space_nd_tests(zip_path): """Make a set of tests to do batch_to_space_nd.""" test_parameters = [ { "dtype": [tf.float32, tf.int64, tf.int32], "input_shape": [[12, 3, 3, 1]], "block_shape": [[1, 4], [2, 2], [3, 4]], "crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]], "constant_block_shape": [True, False], "constant_crops": [True, False], }, # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others. { "dtype": [tf.float32], "input_shape": [[8, 2, 2, 2, 1, 1]], "block_shape": [[2, 2, 2]], "crops": [[[0, 0], [0, 0], [0, 0]]], "constant_block_shape": [True, False], "constant_crops": [True, False], }, ] def build_graph(parameters): """Build a batch_to_space graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) input_tensors = [input_tensor] # Get block_shape either as a const or as a placeholder (tensor). if parameters["constant_block_shape"]: block_shape = parameters["block_shape"] else: shape = [len(parameters["block_shape"])] block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape) input_tensors.append(block_shape) # Get crops either as a const or as a placeholder (tensor). if parameters["constant_crops"]: crops = parameters["crops"] else: shape = [len(parameters["crops"]), 2] crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape) input_tensors.append(crops) out = tf.batch_to_space_nd(input_tensor, block_shape, crops) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_block_shape"]: values.append(np.array(parameters["block_shape"])) if not parameters["constant_crops"]: values.append(np.array(parameters["crops"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_transpose_tests(zip_path): """Make a set of tests to do transpose.""" # TODO(nupurgarg): Add test for uint8. test_parameters = [{ "dtype": [tf.int32, tf.int64, tf.float32], "input_shape": [[2, 2, 3]], "perm": [[0, 1, 2], [0, 2, 1]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4]], "perm": [[0, 1, 2, 3], [3, 0, 1, 2]], "constant_perm": [True, False], }, { "dtype": [tf.float32], "input_shape": [[1, 2, 3, 4, 5]], "perm": [[4, 3, 2, 1, 0]], "constant_perm": [True, False], }] def build_graph(parameters): """Build a transpose graph given `parameters`.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) if parameters["constant_perm"]: perm = parameters["perm"] input_tensors = [input_tensor] else: shape = [len(parameters["perm"]), 2] perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape) input_tensors = [input_tensor, perm] out = tf.transpose(input_tensor, perm=perm) return input_tensors, [out] def build_inputs(parameters, sess, inputs, outputs): values = [ create_tensor_data(parameters["dtype"], parameters["input_shape"]) ] if not parameters["constant_perm"]: values.append(np.array(parameters["perm"])) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_squeeze_tests(zip_path): """Make a set of tests to do squeeze.""" test_parameters = [{ "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]], "axis": [ None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2], [-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6], [0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5] ], }, { "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1]], "axis": [None, [], [0], [-1]], }, { "dtype": [tf.int32, tf.float32, tf.int64], "input_shape": [[1, 1, 1, 1, 1]], "axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]], }] def build_graph(parameters): input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.squeeze(input_tensor, axis=parameters["axis"]) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_values], sess.run( outputs, feed_dict=dict(zip(inputs, [input_values]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_strided_slice_tests(zip_path): """Make a set of tests to do strided_slice.""" # TODO(soroosh): add test/support for uint8. test_parameters = [ # 4-D { "dtype": [tf.float32, tf.int32, tf.int64], "index_type": [tf.int32], "input_shape": [[12, 2, 2, 5]], "begin": [[0, 0, 0, 0], [1, 0, 1, 0]], "end": [[8, 2, 2, 3], [12, 2, 2, 5]], "strides": [None, [2, 1, 3, 1]], "begin_mask": [None, 1, 8], "end_mask": [None, 1, 8], "shrink_axis_mask": [None, 1, 8, 11, 15, -1], "constant_indices": [False, True], }, # TODO(b/73170889) Restore test paramaters removed in cl/191608113. # 2-D { "dtype": [tf.float32, tf.int32, tf.int64], "index_type": [tf.int32], "input_shape": [[2, 3]], "begin": [[0, 0], [1, 0]], "end": [[2, 3], [2, 2]], "strides": [None, [2, 2]], "begin_mask": [None, 1, 2], "end_mask": [None, 1, 2], "shrink_axis_mask": [None, 1, 2, 3, -1], "constant_indices": [False, True], }, # Negative strides { "dtype": [tf.float32], "index_type": [tf.int32], "input_shape": [[2, 3]], "begin": [[0, -1]], "end": [[2, -3]], "strides": [[1, -1]], "begin_mask": [None, 1, 2], "end_mask": [None, 1, 2], "shrink_axis_mask": [None, 1, 2, 3, -1], "constant_indices": [False], }, ] def build_graph(parameters): """Build graph for stride_slice test.""" input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) if parameters["constant_indices"]: begin = parameters["begin"] end = parameters["end"] strides = parameters["strides"] tensors = [input_tensor] else: begin = tf.placeholder( dtype=parameters["index_type"], name="begin", shape=[len(parameters["input_shape"])]) end = tf.placeholder( dtype=parameters["index_type"], name="end", shape=[len(parameters["input_shape"])]) strides = ( tf.placeholder( dtype=parameters["index_type"], name="strides", shape=[len(parameters["input_shape"])]) if parameters["strides"] is not None else None) tensors = [input_tensor, begin, end] if strides is not None: tensors.append(strides) out = tf.strided_slice( input_tensor, begin, end, strides, begin_mask=parameters["begin_mask"], end_mask=parameters["end_mask"]) return tensors, [out] def build_inputs(parameters, sess, inputs, outputs): """Build inputs for stride_slice test.""" input_values = create_tensor_data(parameters["dtype"], parameters["input_shape"]) index_type = _TF_TYPE_INFO[parameters["index_type"]][0] values = [input_values] if not parameters["constant_indices"]: begin_values = np.array(parameters["begin"]).astype(index_type) end_values = np.array(parameters["end"]).astype(index_type) stride_values = ( np.array(parameters["strides"]).astype(index_type) if parameters["strides"] is not None else None) values.append(begin_values) values.append(end_values) if stride_values is not None: values.append(stride_values) return values, sess.run(outputs, feed_dict=dict(zip(inputs, values))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_lstm_tests(zip_path): """Make a set of tests to do basic Lstm cell.""" test_parameters = [ { "dtype": [tf.float32], "num_batchs": [1], "time_step_size": [1], "input_vec_size": [3], "num_cells": [4], }, ] def build_graph(parameters): """Build a simple graph with BasicLSTMCell.""" num_batchs = parameters["num_batchs"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] num_cells = parameters["num_cells"] inputs_after_split = [] for i in xrange(time_step_size): one_timestamp_input = tf.placeholder( dtype=parameters["dtype"], name="split_{}".format(i), shape=[num_batchs, input_vec_size]) inputs_after_split.append(one_timestamp_input) # Currently lstm identifier has a few limitations: only supports # forget_bias == 0, inner state activiation == tanh. # TODO(zhixianyan): Add another test with forget_bias == 1. # TODO(zhixianyan): Add another test with relu as activation. lstm_cell = tf.contrib.rnn.BasicLSTMCell( num_cells, forget_bias=0.0, state_is_tuple=True) cell_outputs, _ = rnn.static_rnn( lstm_cell, inputs_after_split, dtype=tf.float32) out = cell_outputs[-1] return inputs_after_split, [out] def build_inputs(parameters, sess, inputs, outputs): """Feed inputs, assign vairables, and freeze graph.""" with tf.variable_scope("", reuse=True): kernel = tf.get_variable("rnn/basic_lstm_cell/kernel") bias = tf.get_variable("rnn/basic_lstm_cell/bias") kernel_values = create_tensor_data( parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1) bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0, 1) sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values))) num_batchs = parameters["num_batchs"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] input_values = [] for _ in xrange(time_step_size): tensor_data = create_tensor_data(parameters["dtype"], [num_batchs, input_vec_size], 0, 1) input_values.append(tensor_data) out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values))) return input_values, out # TODO(zhixianyan): Automatically generate rnn_states for lstm cell. extra_toco_options = ExtraTocoOptions() extra_toco_options.rnn_states = ( "{state_array:rnn/BasicLSTMCellZeroState/zeros," "back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4}," "{state_array:rnn/BasicLSTMCellZeroState/zeros_1," "back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}") make_zip_of_tests( zip_path, test_parameters, build_graph, build_inputs, extra_toco_options, use_frozen_graph=True) def make_l2_pool(input_tensor, ksize, strides, padding, data_format): """Given an input perform a sequence of TensorFlow ops to produce l2pool.""" return tf.sqrt(tf.nn.avg_pool( tf.square(input_tensor), ksize=ksize, strides=strides, padding=padding, data_format=data_format)) def make_topk_tests(zip_path): """Make a set of tests to do topk.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[10], [5, 20]], }] def build_graph(parameters): """Build the topk op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) k = tf.constant(3, name="k") out = tf.nn.top_k(input_value, k) return [input_value], [out[1]] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_arg_max_tests(zip_path): """Make a set of tests to do arg_max.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32], "input_shape": [[1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]], "axis": [0, 1, 2, 3], "output_type": [tf.int32, tf.int64], }] def build_graph(parameters): """Build the topk op testing graph.""" input_value = tf.placeholder( dtype=parameters["input_dtype"], name="input", shape=parameters["input_shape"]) axis = tf.constant(parameters["axis"], name="axis") out = tf.arg_max(input_value, axis, output_type=parameters["output_type"]) return [input_value], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["input_dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) def make_less_tests(zip_path): """Make a set of tests to do less.""" test_parameters = [{ "input_dtype": [tf.float32, tf.int32, tf.int64], "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]), ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]), ([5, 5], [1]), ([10], [2, 4, 10])], }] def build_graph(parameters): """Build the less op testing graph.""" input_value1 = tf.placeholder( dtype=parameters["input_dtype"], name="input1", shape=parameters["input_shape_pair"][0]) input_value2 = tf.placeholder( dtype=parameters["input_dtype"], name="input2", shape=parameters["input_shape_pair"][1]) out = tf.less(input_value1, input_value2) return [input_value1, input_value2], [out] def build_inputs(parameters, sess, inputs, outputs): input_value1 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][0]) input_value2 = create_tensor_data(parameters["input_dtype"], parameters["input_shape_pair"][1]) return [input_value1, input_value2], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2]))) make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs) # Toco binary path provided by the generate rule. bin_path = None def main(unused_args): global bin_path def mkdir_if_not_exist(x): if not os.path.isdir(x): os.mkdir(x) if not os.path.isdir(x): raise RuntimeError("Failed to create dir %r" % x) opstest_path = os.path.join(FLAGS.output_path) mkdir_if_not_exist(opstest_path) out = FLAGS.zip_to_output bin_path = FLAGS.toco test_function = ("make_%s_tests" % out.replace(".zip", "")) if test_function not in globals(): raise RuntimeError("Can't find a test function to create %r. Tried %r" % (out, test_function)) # TODO(ahentz): accessing globals() is not very elegant. We should either # break this file into multiple tests or use decorator-based registration to # avoid using globals(). globals()[test_function](os.path.join(opstest_path, out)) if __name__ == "__main__": FLAGS, unparsed = parser.parse_known_args() if unparsed: print("Usage: %s <path out> <zip file to generate>") else: tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
make_binary_op_tests(zip_path, tf.subtract)
PiBot.py
#!/usr/bin/python3 import RPi.GPIO as GPIO import threading import queue import time import distance, motors, speak gettingCloseSpoken = False turningAroundSpoken = False def calculateManeuvers(objDist):
if __name__ == '__main__': #try: #motorSpeed = queue.Queue() #motorDirection = queue.Queue() #motor_thread = threading.Thread(name="motor_thread", target=motors.threadTest, args=(motorSpeed,)) #direction_thread = threading.Thread(name="direction_thread", target=motors.threadTest2, args=(motorDirection,)) #motor_thread.start() #direction_thread.start() #print(threading.activeCount()) while True: # infinite loop #objDist = distance.getDistance() for objDist in range(0,101, 5): time.sleep(500.0 / 1000.0) calculateManeuvers(objDist) #motorSpeed.put(speed) #except: # print("Exiting") # pass # GPIO.cleanup()
global gettingCloseSpoken global turningAroundSpoken if(objDist < 10): #Turn around speedLeft = 25 speedRight = 25 if(turningAroundSpoken == False): t1 = threading.Thread(target=speak.SpeechFromText, args=("Object detected. Scanning",)) t1.start() turningAroundSpoken = True else: turningAroundSpoken = False motors.hardLeft(speedLeft, speedRight) else: motors.moveForward(objDist, objDist)
test_table.py
# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import table from ceilometer import sample from ceilometer.tests.network import statistics class TestTablePollsters(statistics._PollsterTestBase):
def test_table_pollster(self): self._test_pollster( table.TablePollster, 'switch.table', sample.TYPE_GAUGE, 'table') def test_table_pollster_active_entries(self): self._test_pollster( table.TablePollsterActiveEntries, 'switch.table.active.entries', sample.TYPE_GAUGE, 'entry') def test_table_pollster_lookup_packets(self): self._test_pollster( table.TablePollsterLookupPackets, 'switch.table.lookup.packets', sample.TYPE_GAUGE, 'packet') def test_table_pollster_matched_packets(self): self._test_pollster( table.TablePollsterMatchedPackets, 'switch.table.matched.packets', sample.TYPE_GAUGE, 'packet')
psqt.rs
use {Player,SQ,File,Piece}; use core::masks::*; use core::score::*; const BONUS: [[[Score; (FILE_CNT / 2)]; RANK_CNT]; PIECE_TYPE_CNT] = [ [ // NO PIECE [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], ], [ // Pawn [ Score( 0, 0), Score( 0, 0), Score( 0, 0), Score( 0, 0) ], [ Score(-11, 7), Score( 6,-4), Score( 7, 8), Score( 3,-2) ], [ Score(-18,-4), Score( -2,-5), Score( 19, 5), Score(24, 4) ], [ Score(-17, 3), Score( -9, 3), Score( 20,-8), Score(35,-3) ], [ Score( -6, 8), Score( 5, 9), Score( 3, 7), Score(21,-6) ], [ Score( -6, 8), Score( -8,-5), Score( -6, 2), Score(-2, 4) ], [ Score( -4, 3), Score( 20,-9), Score( -8, 1), Score(-4,18) ], [ Score( 0, 0), Score( 0, 0), Score( 0, 0), Score( 0, 0) ] ], [ // Knight [ Score(-161,-105), Score(-96,-82), Score(-80,-46), Score(-73,-14) ], [ Score( -83, -69), Score(-43,-54), Score(-21,-17), Score(-10, 9) ], [ Score( -71, -50), Score(-22,-39), Score( 0, -7), Score( 9, 28) ], [ Score( -25, -41), Score( 18,-25), Score( 43, 6), Score( 47, 38) ], [ Score( -26, -46), Score( 16,-25), Score( 38, 3), Score( 50, 40) ], [ Score( -11, -54), Score( 37,-38), Score( 56, -7), Score( 65, 27) ], [ Score( -63, -65), Score(-19,-50), Score( 5,-24), Score( 14, 13) ], [ Score(-195,-109), Score(-67,-89), Score(-42,-50), Score(-29,-13) ] ], [ // Bishop [ Score(-44,-58), Score(-13,-31), Score(-25,-37), Score(-34,-19) ], [ Score(-20,-34), Score( 20, -9), Score( 12,-14), Score( 1, 4) ], [ Score( -9,-23), Score( 27, 0), Score( 21, -3), Score( 11, 16) ], [ Score(-11,-26), Score( 28, -3), Score( 21, -5), Score( 10, 16) ], [ Score(-11,-26), Score( 27, -4), Score( 16, -7), Score( 9, 14) ], [ Score(-17,-24), Score( 16, -2), Score( 12, 0), Score( 2, 13) ], [ Score(-23,-34), Score( 17,-10), Score( 6,-12), Score( -2, 6) ], [ Score(-35,-55), Score(-11,-32), Score(-19,-36), Score(-29,-17) ] ], [ // Rook [ Score(-25, 0), Score(-16, 0), Score(-16, 0), Score(-9, 0) ], [ Score(-21, 0), Score( -8, 0), Score( -3, 0), Score( 0, 0) ], [ Score(-21, 0), Score( -9, 0), Score( -4, 0), Score( 2, 0) ], [ Score(-22, 0), Score( -6, 0), Score( -1, 0), Score( 2, 0) ], [ Score(-22, 0), Score( -7, 0), Score( 0, 0), Score( 1, 0) ], [ Score(-21, 0), Score( -7, 0), Score( 0, 0), Score( 2, 0) ], [ Score(-12, 0), Score( 4, 0), Score( 8, 0), Score(12, 0) ], [ Score(-23, 0), Score(-15, 0), Score(-11, 0), Score(-5, 0) ] ], [ // Queen [ Score( 0,-71), Score(-4,-56), Score(-3,-42), Score(-1,-29) ], [ Score(-4,-56), Score( 6,-30), Score( 9,-21), Score( 8, -5) ], [ Score(-2,-39), Score( 6,-17), Score( 9, -8), Score( 9, 5) ], [ Score(-1,-29), Score( 8, -5), Score(10, 9), Score( 7, 19) ], [ Score(-3,-27), Score( 9, -5), Score( 8, 10), Score( 7, 21) ], [ Score(-2,-40), Score( 6,-16), Score( 8,-10), Score(10, 3) ], [ Score(-2,-55), Score( 7,-30), Score( 7,-21), Score( 6, -6) ], [ Score(-1,-74), Score(-4,-55), Score(-1,-43), Score( 0,-30) ] ], [ // King [ Score(267, 0), Score(320, 48), Score(270, 75), Score(195, 84) ], [ Score(264, 43), Score(304, 92), Score(238,143), Score(180,132) ], [ Score(200, 83), Score(245,138), Score(176,167), Score(110,165) ], [ Score(177,106), Score(185,169), Score(148,169), Score(110,179) ], [ Score(149,108), Score(177,163), Score(115,200), Score( 66,203) ], [ Score(118, 95), Score(159,155), Score( 84,176), Score( 41,174) ], [ Score( 87, 50), Score(128, 99), Score( 63,122), Score( 20,139) ], [ Score( 63, 9), Score( 88, 55), Score( 47, 80), Score( 0, 90) ] ], [ // ALL PIECE [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], [ Score(0, 0), Score(0, 0), Score(0, 0), Score(0, 0)], ] ]; static mut PSQ: [[Score; SQ_CNT]; PIECE_CNT] = [[Score(0,0); SQ_CNT]; PIECE_CNT]; static PIECE_VALUE: [[Value; PHASE_CNT]; PIECE_CNT] = [[0, 0], // Empty [ PAWN_MG, PAWN_EG], // White Pawn [ KNIGHT_MG, KNIGHT_EG],// White Knight [ BISHOP_MG, BISHOP_EG],// White Bishop [ ROOK_MG, ROOK_EG], // White Rook [ QUEEN_MG, QUEEN_MG], // White Queen [ ZERO, ZERO], // White King [0, 0], [0, 0], // Empty [ PAWN_MG, PAWN_EG], // Black Pawn [ KNIGHT_MG, KNIGHT_EG],// Black Knight [ BISHOP_MG, BISHOP_EG],// Black Bishop [ ROOK_MG, ROOK_EG], // Black Rook [ QUEEN_MG, QUEEN_MG], // Black Queen [ ZERO, ZERO], // Black King [0, 0], ]; #[cold] pub fn init_psqt() { for piece in 0..PIECE_TYPE_CNT { let v: Score = Score(PIECE_VALUE[piece][0], PIECE_VALUE[piece][1]); for s in 0..SQ_CNT { let sq: SQ = SQ(s as u8); let f: File = sq.file().min(!sq.file()); let score = v + BONUS[piece][sq.rank() as usize][f as usize]; unsafe { PSQ[(Player::White as usize) << 3 | piece][s] = score; PSQ[(Player::Black as usize) << 3 | piece][sq.flip().0 as usize] = -score; } } } } /// Returns the score for a player's piece being at a particular square. #[inline(always)] pub fn psq(piece: Piece, sq: SQ) -> Score
/// Returns the value of a piece for a player. If `eg` is true, it returns the end game value. Otherwise, /// it'll return the midgame value. #[inline(always)] pub fn piece_value(piece: Piece, eg: bool) -> Value { unsafe { (*(PIECE_VALUE.get_unchecked(piece as usize)).get_unchecked(eg as usize)) } } #[cfg(test)] mod tests { use super::*; #[test] fn psq_tes() { init_psqt(); assert_eq!(psq(Piece::WhiteQueen, SQ::A1), -psq(Piece::BlackQueen, SQ::A8)); assert_eq!(psq(Piece::WhiteRook, SQ::A1), -psq( Piece::BlackRook, SQ::A8)); assert_eq!(psq(Piece::WhitePawn, SQ::B1), -psq( Piece::BlackPawn, SQ::B8)); assert_eq!(psq(Piece::BlackKnight, SQ::B4), -psq(Piece::WhiteKnight,SQ::B5)); } }
{ debug_assert!(sq.is_okay()); unsafe { *(PSQ.get_unchecked(piece as usize)).get_unchecked(sq.0 as usize) } }
tool.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from compas.geometry import Frame from compas.geometry import Transformation from compas.robots.model.robot import RobotModel class ToolModel(RobotModel): """Represents a tool to be attached to the robot's flange. Attributes ---------- visual : :class:`~compas.datastructures.Mesh` The visual mesh of the tool. frame : :class:`~compas.geometry.Frame` The frame of the tool in tool0 frame. collision : :class:`~compas.datastructures.Mesh` The collision mesh representation of the tool. name : str The name of the `ToolModel`. Defaults to 'attached_tool'. link_name : str The name of the `Link` to which the tool is attached. Defaults to ``None``. Examples -------- >>> import compas >>> from compas.datastructures import Mesh >>> from compas.geometry import Frame >>> mesh = Mesh.from_stl(compas.get('cone.stl')) >>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1]) >>> tool = ToolModel(mesh, frame) """ def __init__(self, visual, frame_in_tool0_frame, collision=None, name="attached_tool", link_name=None): collision = collision or visual super(ToolModel, self).__init__(name) self.add_link("attached_tool_link", visual_mesh=visual, collision_mesh=collision) self._rebuild_tree() self._create(self.root, Transformation()) self.frame = frame_in_tool0_frame self.link_name = link_name @classmethod def from_robot_model(cls, robot, frame_in_tool0_frame, link_name=None): """Creates a ``ToolModel`` from a :class:`~compas.robots.RobotModel` instance. Parameters ---------- robot : :class:`~compas.robots.RobotModel` frame_in_tool0_frame : str The frame of the tool in tool0 frame. link_name : str The name of the `Link` to which the tool is attached. Defaults to ``None``. """ data = robot.data data['frame'] = frame_in_tool0_frame.data data['link_name'] = link_name return cls.from_data(data) @property def data(self): """Returns the data dictionary that represents the tool. Returns ------- dict The tool data. """ return self._get_data() def _get_data(self): data = super(ToolModel, self)._get_data() data['frame'] = self.frame.data data['link_name'] = self.link_name return data @data.setter def data(self, data): self._set_data(data) def _set_data(self, data): super(ToolModel, self)._set_data(data) self.frame = Frame.from_data(data['frame']) self.name = self.name or 'attached_tool' self.link_name = data['link_name'] if 'link_name' in data else None @classmethod def from_data(cls, data): """Construct a `ToolModel` from its data representation. To be used in conjunction with the :meth:`to_data` method. Parameters ---------- data : dict The data dictionary. Returns ------- :class:`ToolModel` The constructed `ToolModel`. """ tool = cls(None, None) tool.data = data return tool def from_tcf_to_t0cf(self, frames_tcf): """Converts a list of frames at the robot's tool tip (tcf frame) to frames at the robot's flange (tool0 frame). Parameters ---------- frames_tcf : list[:class:`~compas.geometry.Frame`] Frames (in WCF) at the robot's tool tip (tcf). Returns ------- list[:class:`~compas.geometry.Frame`] Frames (in WCF) at the robot's flange (tool0). Examples -------- >>> import compas >>> from compas.datastructures import Mesh >>> from compas.geometry import Frame >>> mesh = Mesh.from_stl(compas.get('cone.stl'))
[Frame(Point(-0.363, 0.003, -0.147), Vector(0.388, -0.351, -0.852), Vector(0.276, 0.926, -0.256))] """ Te = Transformation.from_frame_to_frame(self.frame, Frame.worldXY()) return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_tcf] def from_t0cf_to_tcf(self, frames_t0cf): """Converts frames at the robot's flange (tool0 frame) to frames at the robot's tool tip (tcf frame). Parameters ---------- frames_t0cf : list[:class:`~compas.geometry.Frame`] Frames (in WCF) at the robot's flange (tool0). Returns ------- list[:class:`~compas.geometry.Frame`] Frames (in WCF) at the robot's tool tip (tcf). Examples -------- >>> import compas >>> from compas.datastructures import Mesh >>> from compas.geometry import Frame >>> mesh = Mesh.from_stl(compas.get('cone.stl')) >>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1]) >>> tool = ToolModel(mesh, frame) >>> frames_t0cf = [Frame((-0.363, 0.003, -0.147), (0.388, -0.351, -0.852), (0.276, 0.926, -0.256))] >>> tool.from_t0cf_to_tcf(frames_t0cf) [Frame(Point(-0.309, -0.046, -0.266), Vector(0.276, 0.926, -0.256), Vector(0.879, -0.136, 0.456))] """ Te = Transformation.from_frame_to_frame(Frame.worldXY(), self.frame) return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_t0cf]
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1]) >>> tool = ToolModel(mesh, frame) >>> frames_tcf = [Frame((-0.309, -0.046, -0.266), (0.276, 0.926, -0.256), (0.879, -0.136, 0.456))] >>> tool.from_tcf_to_t0cf(frames_tcf)
configure.py
#!/usr/bin/env python """ Configure folder for sCMOS testing. Hazen 09/17 """ import numpy import os import storm_analysis import storm_analysis.sa_library.parameters as parameters import storm_analysis.simulator.emitters_on_grid as emittersOnGrid import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom import storm_analysis.diagnostics.sCMOS.settings as settings
""" Create a sCMOS parameters object. """ params = parameters.ParametersSCMOS() params.setAttr("max_frame", "int", -1) params.setAttr("start_frame", "int", -1) params.setAttr("background_sigma", "float", 8.0) params.setAttr("camera_calibration", "filename", cal_file) params.setAttr("find_max_radius", "int", 5) params.setAttr("fit_error_model", "string", settings.fit_error_model) params.setAttr("foreground_sigma", "float", 1.5) params.setAttr("iterations", "int", settings.iterations) params.setAttr("model", "string", settings.model) params.setAttr("pixel_size", "float", settings.pixel_size) params.setAttr("roi_size", "int", settings.roi_size) params.setAttr("sigma", "float", 1.5) params.setAttr("threshold", "float", settings.threshold) # Don't do tracking. params.setAttr("descriptor", "string", "1") params.setAttr("radius", "float", "0.0") # Don't do drift-correction. params.setAttr("d_scale", "int", 2) params.setAttr("drift_correction", "int", 0) params.setAttr("frame_step", "int", 500) params.setAttr("z_correction", "int", 0) # Z fitting. # # These are nonsense values. We test either '2D' of '3D' mode # and check how well we do at fitting the localization widths. # params.setAttr("do_zfit", "int", 0) params.setAttr("cutoff", "float", 0.0) params.setAttr("max_z", "float", 0.5) params.setAttr("min_z", "float", -0.5) params.setAttr("z_value", "float", 0.0) params.setAttr("z_step", "float", 1.0) params.setAttr("wx_wo", "float", 1.0) params.setAttr("wx_c", "float", 1.0) params.setAttr("wx_d", "float", 1.0) params.setAttr("wxA", "float", 0.0) params.setAttr("wxB", "float", 0.0) params.setAttr("wxC", "float", 0.0) params.setAttr("wxD", "float", 0.0) params.setAttr("wy_wo", "float", 1.0) params.setAttr("wy_c", "float", 1.0) params.setAttr("wy_d", "float", 1.0) params.setAttr("wyA", "float", 0.0) params.setAttr("wyB", "float", 0.0) params.setAttr("wyC", "float", 0.0) params.setAttr("wyD", "float", 0.0) # 'peak_locations' testing. if hasattr(settings, "peak_locations") and (settings.peak_locations is not None): params.setAttr("peak_locations", "filename", settings.peak_locations) return params def configure(cal_file = None): # Create sCMOS calibration file if not specified. # if cal_file is None: cal_file = "calib.npy" offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain rqe = numpy.ones((settings.y_size, settings.x_size)) numpy.save(cal_file, [offset, variance, gain, rqe, 2]) # Create parameters file for analysis. # print("Creating XML file.") params = testingParameters(cal_file) params.toXMLFile("scmos.xml", pretty = True) # Create localization on a grid file. # print("Creating gridded localization.") emittersOnGrid.emittersOnGrid("grid_list.hdf5", settings.nx, settings.ny, 1.5, 20, 0.0, 0.0) # Create randomly located localizations file. # print("Creating random localization.") emittersUniformRandom.emittersUniformRandom("random_list.hdf5", 1.0, 10, settings.x_size, settings.y_size, 0.0) if (__name__ == "__main__"): configure()
def testingParameters(cal_file):
twitchusercontext.go
package main import ( "fmt" "sync" "github.com/pajbot/pajbot2/pkg" ) var _ pkg.UserContext = &UserContext{} type UserContext struct { mutex *sync.Mutex // key = channel ID context map[string]map[string][]string } func NewUserContext() *UserContext { c := &UserContext{ mutex: &sync.Mutex{}, context: make(map[string]map[string][]string), } return c } func (c *UserContext) GetContext(channelID, userID string) []string { c.mutex.Lock() defer c.mutex.Unlock() if users, ok := c.context[channelID]; ok
return nil } func (c *UserContext) AddContext(channelID, userID, message string) { if channelID == "" { fmt.Println("Channel ID is empty") return } if userID == "" { fmt.Println("User ID is empty") return } c.mutex.Lock() defer c.mutex.Unlock() _, ok := c.context[channelID] if !ok { c.context[channelID] = make(map[string][]string) } c.context[channelID][userID] = append(c.context[channelID][userID], message) newLen := len(c.context[channelID][userID]) - 5 if newLen > 5 { c.context[channelID][userID] = c.context[channelID][userID][newLen:] } }
{ if userContext, ok := users[userID]; ok { return userContext } }
test_util.py
# # test_util.py # # This source file is part of the FoundationDB open source project # # Copyright 2013-2018 Apple Inc. and the FoundationDB project authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import random import uuid import unicodedata import ctypes import math import fdb import fdb.tuple from bindingtester import util from bindingtester import FDB_API_VERSION from bindingtester.known_testers import COMMON_TYPES class RandomGenerator(object): def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES): self.max_int_bits = max_int_bits self.api_version = api_version self.types = types def random_unicode_str(self, length): return u''.join(self.random_unicode_char() for i in range(0, length)) def random_int(self): num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability max_value = (1 << num_bits) - 1 min_value = -max_value - 1 num = random.randint(min_value, max_value) # util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,))))) return num def random_float(self, exp_bits): if random.random() < 0.05: # Choose a special value. return random.choice([float('-nan'), float('-inf'), -0.0, 0.0, float('inf'), float('nan')]) else: # Choose a value from all over the range of acceptable floats for this precision. sign = -1 if random.random() < 0.5 else 1 exponent = random.randint(-(1 << (exp_bits - 1)) - 10, (1 << (exp_bits - 1) - 1)) mantissa = random.random() result = sign * math.pow(2, exponent) * mantissa if random.random() < 0.05: result = float(int(result)) return result def random_tuple(self, max_size, incomplete_versionstamps=False): size = random.randint(1, max_size) tup = [] for i in range(size): choice = random.choice(self.types) if choice == 'int': tup.append(self.random_int()) elif choice == 'null': tup.append(None) elif choice == 'bytes': tup.append(self.random_string(random.randint(0, 100))) elif choice == 'string': tup.append(self.random_unicode_str(random.randint(0, 100))) elif choice == 'uuid': tup.append(uuid.uuid4()) elif choice == 'bool': b = random.random() < 0.5 if self.api_version < 500: tup.append(int(b)) else: tup.append(b) elif choice == 'float': tup.append(fdb.tuple.SingleFloat(self.random_float(8))) elif choice == 'double': tup.append(self.random_float(11)) elif choice == 'tuple': length = random.randint(0, max_size - size) if length == 0: tup.append(()) else: tup.append(self.random_tuple(length)) elif choice == 'versionstamp': if incomplete_versionstamps and random.random() < 0.5: tr_version = fdb.tuple.Versionstamp._UNSET_TR_VERSION else: tr_version = self.random_string(10) user_version = random.randint(0, 0xffff) tup.append(fdb.tuple.Versionstamp(tr_version, user_version)) else: assert false return tuple(tup) def random_tuple_list(self, max_size, max_list_size): size = random.randint(1, max_list_size) tuples = [] for i in range(size): to_add = self.random_tuple(max_size) tuples.append(to_add) if len(to_add) > 1 and random.random() < 0.25: # Add a smaller one to test prefixes. smaller_size = random.randint(1, len(to_add)) tuples.append(to_add[:smaller_size]) else: non_empty = filter(lambda (_, x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add)) if len(non_empty) > 0 and random.random() < 0.25: # Add a smaller list to test prefixes of nested structures. idx, choice = random.choice(non_empty) smaller_size = random.randint(0, len(to_add[idx])) tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx + 1:]) random.shuffle(tuples) return tuples def random_range_params(self): if random.random() < 0.75: limit = random.randint(1, 1e3) elif random.random() < 0.75: limit = 0 else: limit = random.randint(1e8, (1 << 31) - 1) return (limit, random.randint(0, 1), random.randint(-2, 4)) def random_selector_params(self): if random.random() < 0.9: offset = random.randint(-20, 20) else: offset = random.randint(-1000, 1000) return (random.randint(0, 1), offset) def random_string(self, length): if length == 0: return '' return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length - 1)) def random_unicode_char(self): while True: if random.random() < 0.05: # Choose one of these special character sequences. specials = [u'\U0001f4a9', u'\U0001f63c', u'\U0001f3f3\ufe0f\u200d\U0001f308', u'\U0001f1f5\U0001f1f2', u'\uf8ff', u'\U0002a2b2', u'\u05e9\u05dc\u05d5\u05dd'] return random.choice(specials) c = random.randint(0, 0xffff) if unicodedata.category(unichr(c))[0] in 'LMNPSZ': return unichr(c) def error_string(error_code): return fdb.tuple.pack(('ERROR', str(error_code)))
instructions.append('WAIT_FUTURE') instructions.append('RESET') def to_front(instructions, index): if index == 0: pass elif index == 1: instructions.push_args(1) instructions.append('SWAP') elif index == 2: instructions.push_args(index - 1) instructions.append('SWAP') instructions.push_args(index) instructions.append('SWAP') else: instructions.push_args(index - 1) instructions.append('SWAP') instructions.push_args(index) instructions.append('SWAP') instructions.push_args(index - 1) instructions.append('SWAP') to_front(instructions, index - 1) def with_length(tup): return (len(tup),) + tup
def blocking_commit(instructions): instructions.append('COMMIT')
disable_identity_parameters.go
// Code generated by go-swagger; DO NOT EDIT. // // Copyright NetFoundry, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // __ __ _ // \ \ / / (_) // \ \ /\ / /_ _ _ __ _ __ _ _ __ __ _ // \ \/ \/ / _` | '__| '_ \| | '_ \ / _` | // \ /\ / (_| | | | | | | | | | | (_| | : This file is generated, do not edit it. // \/ \/ \__,_|_| |_| |_|_|_| |_|\__, | // __/ | // |___/ package identity // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "context" "net/http" "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" "github.com/openziti/edge/rest_model" ) // NewDisableIdentityParams creates a new DisableIdentityParams object, // with the default timeout for this client. // // Default values are not hydrated, since defaults are normally applied by the API server side. // // To enforce default values in parameter, use SetDefaults or WithDefaults. func NewDisableIdentityParams() *DisableIdentityParams { return &DisableIdentityParams{ timeout: cr.DefaultTimeout, } }
// with the ability to set a timeout on a request. func NewDisableIdentityParamsWithTimeout(timeout time.Duration) *DisableIdentityParams { return &DisableIdentityParams{ timeout: timeout, } } // NewDisableIdentityParamsWithContext creates a new DisableIdentityParams object // with the ability to set a context for a request. func NewDisableIdentityParamsWithContext(ctx context.Context) *DisableIdentityParams { return &DisableIdentityParams{ Context: ctx, } } // NewDisableIdentityParamsWithHTTPClient creates a new DisableIdentityParams object // with the ability to set a custom HTTPClient for a request. func NewDisableIdentityParamsWithHTTPClient(client *http.Client) *DisableIdentityParams { return &DisableIdentityParams{ HTTPClient: client, } } /* DisableIdentityParams contains all the parameters to send to the API endpoint for the disable identity operation. Typically these are written to a http.Request. */ type DisableIdentityParams struct { /* Disable. Disable parameters */ Disable *rest_model.DisableParams /* ID. The id of the requested resource */ ID string timeout time.Duration Context context.Context HTTPClient *http.Client } // WithDefaults hydrates default values in the disable identity params (not the query body). // // All values with no default are reset to their zero value. func (o *DisableIdentityParams) WithDefaults() *DisableIdentityParams { o.SetDefaults() return o } // SetDefaults hydrates default values in the disable identity params (not the query body). // // All values with no default are reset to their zero value. func (o *DisableIdentityParams) SetDefaults() { // no default values defined for this parameter } // WithTimeout adds the timeout to the disable identity params func (o *DisableIdentityParams) WithTimeout(timeout time.Duration) *DisableIdentityParams { o.SetTimeout(timeout) return o } // SetTimeout adds the timeout to the disable identity params func (o *DisableIdentityParams) SetTimeout(timeout time.Duration) { o.timeout = timeout } // WithContext adds the context to the disable identity params func (o *DisableIdentityParams) WithContext(ctx context.Context) *DisableIdentityParams { o.SetContext(ctx) return o } // SetContext adds the context to the disable identity params func (o *DisableIdentityParams) SetContext(ctx context.Context) { o.Context = ctx } // WithHTTPClient adds the HTTPClient to the disable identity params func (o *DisableIdentityParams) WithHTTPClient(client *http.Client) *DisableIdentityParams { o.SetHTTPClient(client) return o } // SetHTTPClient adds the HTTPClient to the disable identity params func (o *DisableIdentityParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } // WithDisable adds the disable to the disable identity params func (o *DisableIdentityParams) WithDisable(disable *rest_model.DisableParams) *DisableIdentityParams { o.SetDisable(disable) return o } // SetDisable adds the disable to the disable identity params func (o *DisableIdentityParams) SetDisable(disable *rest_model.DisableParams) { o.Disable = disable } // WithID adds the id to the disable identity params func (o *DisableIdentityParams) WithID(id string) *DisableIdentityParams { o.SetID(id) return o } // SetID adds the id to the disable identity params func (o *DisableIdentityParams) SetID(id string) { o.ID = id } // WriteToRequest writes these params to a swagger request func (o *DisableIdentityParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { if err := r.SetTimeout(o.timeout); err != nil { return err } var res []error if o.Disable != nil { if err := r.SetBodyParam(o.Disable); err != nil { return err } } // path param id if err := r.SetPathParam("id", o.ID); err != nil { return err } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
// NewDisableIdentityParamsWithTimeout creates a new DisableIdentityParams object
model_firmware_iom_descriptor.go
/* Cisco Intersight Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. API version: 1.0.9-5517 Contact: [email protected] */ // Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. package intersight import ( "encoding/json" "reflect" "strings" ) // FirmwareIomDescriptor Descriptor to uniquely identify a IOM component. type FirmwareIomDescriptor struct { FirmwareComponentDescriptor AdditionalProperties map[string]interface{} } type _FirmwareIomDescriptor FirmwareIomDescriptor // NewFirmwareIomDescriptor instantiates a new FirmwareIomDescriptor object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func
(classId string, objectType string) *FirmwareIomDescriptor { this := FirmwareIomDescriptor{} this.ClassId = classId this.ObjectType = objectType return &this } // NewFirmwareIomDescriptorWithDefaults instantiates a new FirmwareIomDescriptor object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewFirmwareIomDescriptorWithDefaults() *FirmwareIomDescriptor { this := FirmwareIomDescriptor{} return &this } func (o FirmwareIomDescriptor) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} serializedFirmwareComponentDescriptor, errFirmwareComponentDescriptor := json.Marshal(o.FirmwareComponentDescriptor) if errFirmwareComponentDescriptor != nil { return []byte{}, errFirmwareComponentDescriptor } errFirmwareComponentDescriptor = json.Unmarshal([]byte(serializedFirmwareComponentDescriptor), &toSerialize) if errFirmwareComponentDescriptor != nil { return []byte{}, errFirmwareComponentDescriptor } for key, value := range o.AdditionalProperties { toSerialize[key] = value } return json.Marshal(toSerialize) } func (o *FirmwareIomDescriptor) UnmarshalJSON(bytes []byte) (err error) { type FirmwareIomDescriptorWithoutEmbeddedStruct struct { } varFirmwareIomDescriptorWithoutEmbeddedStruct := FirmwareIomDescriptorWithoutEmbeddedStruct{} err = json.Unmarshal(bytes, &varFirmwareIomDescriptorWithoutEmbeddedStruct) if err == nil { varFirmwareIomDescriptor := _FirmwareIomDescriptor{} *o = FirmwareIomDescriptor(varFirmwareIomDescriptor) } else { return err } varFirmwareIomDescriptor := _FirmwareIomDescriptor{} err = json.Unmarshal(bytes, &varFirmwareIomDescriptor) if err == nil { o.FirmwareComponentDescriptor = varFirmwareIomDescriptor.FirmwareComponentDescriptor } else { return err } additionalProperties := make(map[string]interface{}) if err = json.Unmarshal(bytes, &additionalProperties); err == nil { // remove fields from embedded structs reflectFirmwareComponentDescriptor := reflect.ValueOf(o.FirmwareComponentDescriptor) for i := 0; i < reflectFirmwareComponentDescriptor.Type().NumField(); i++ { t := reflectFirmwareComponentDescriptor.Type().Field(i) if jsonTag := t.Tag.Get("json"); jsonTag != "" { fieldName := "" if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 { fieldName = jsonTag[:commaIdx] } else { fieldName = jsonTag } if fieldName != "AdditionalProperties" { delete(additionalProperties, fieldName) } } } o.AdditionalProperties = additionalProperties } return err } type NullableFirmwareIomDescriptor struct { value *FirmwareIomDescriptor isSet bool } func (v NullableFirmwareIomDescriptor) Get() *FirmwareIomDescriptor { return v.value } func (v *NullableFirmwareIomDescriptor) Set(val *FirmwareIomDescriptor) { v.value = val v.isSet = true } func (v NullableFirmwareIomDescriptor) IsSet() bool { return v.isSet } func (v *NullableFirmwareIomDescriptor) Unset() { v.value = nil v.isSet = false } func NewNullableFirmwareIomDescriptor(val *FirmwareIomDescriptor) *NullableFirmwareIomDescriptor { return &NullableFirmwareIomDescriptor{value: val, isSet: true} } func (v NullableFirmwareIomDescriptor) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableFirmwareIomDescriptor) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
NewFirmwareIomDescriptor
dynamic.rs
use crate::numbers::*; use crate::{ErrorReason, VoidResult}; use arrayvec::*; use std::iter::FromIterator; use std::ops::*; use std::slice::{Iter, IterMut}; use std::usize; /// A type which internally switches between stack and heap allocation. /// This is supposed to perform faster but the main reason is that this /// way we automatically have a limited stack allocation available on systems /// without heap, and on systems with heap allocation we don't have to worry /// about introducing artifical limits. pub enum InlineVector<T> { Inline(ArrayVec<[T; 64]>), Dynamic(Vec<T>), } impl<T> InlineVector<T> where T: Copy, { /// Create a new vector of a given size filled with a given default value. pub fn of_size(default: T, n: usize) -> InlineVector<T> { let mut result = Self::with_capacity(n); for _ in 0..n { result.push(default); } result } } /// Most of the operations behave as defined for `std::Vec`. impl<T> InlineVector<T> { /// Returns the maximimum size the vector can possibly have. pub fn max_capacity() -> usize { usize::MAX } /// Returns a vector with a given capacity. pub fn with_capacity(n: usize) -> InlineVector<T> { if n <= 64 { InlineVector::Inline(ArrayVec::<[T; 64]>::new()) } else { InlineVector::Dynamic(Vec::with_capacity(n)) } } /// Returns a vector with a default capacity. The default /// capacity will be relativly small and should only be used /// to store small lookup tables or results. /// /// As of today the default capacity is `64`. pub fn with_default_capcacity() -> InlineVector<T> { Self::with_capacity(64) } /// Returns a vector holding a single element. pub fn with_elem(elem: T) -> InlineVector<T> { let mut vector = Self::with_capacity(1); vector.push(elem); vector } /// Returns an empty vector. pub fn empty() -> InlineVector<T> { Self::with_capacity(0) } pub fn push(&mut self, elem: T) { match *self { InlineVector::Inline(ref mut v) => { v.push(elem); } InlineVector::Dynamic(ref mut v) => v.push(elem), }; } pub fn pop(&mut self) -> Option<T> { match *self { InlineVector::Inline(ref mut v) => v.pop(), InlineVector::Dynamic(ref mut v) => v.pop(), } } pub fn remove(&mut self, index: usize) -> T { match *self { InlineVector::Inline(ref mut v) => v.remove(index), InlineVector::Dynamic(ref mut v) => v.remove(index), } } pub fn len(&self) -> usize { match *self { InlineVector::Inline(ref v) => v.len(), InlineVector::Dynamic(ref v) => v.len(), } } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn capacity(&self) -> usize { match *self { InlineVector::Inline(ref v) => v.capacity(), InlineVector::Dynamic(ref v) => v.capacity(), } } #[inline] pub fn iter(&self) -> Iter<T> { match *self { InlineVector::Inline(ref v) => v.iter(), InlineVector::Dynamic(ref v) => v.iter(), } } #[inline] pub fn iter_mut(&mut self) -> IterMut<T> { match *self { InlineVector::Inline(ref mut v) => v.iter_mut(), InlineVector::Dynamic(ref mut v) => v.iter_mut(), } } pub fn append(&mut self, other: &mut Self) { while !other.is_empty() { self.push(other.remove(0)); } } pub fn insert(&mut self, index: usize, element: T) { match *self { InlineVector::Inline(ref mut v) => { v.insert(index, element); } InlineVector::Dynamic(ref mut v) => v.insert(index, element), } } } impl<T: Zero + Clone> InlineVector<T> { pub fn try_resize(&mut self, len: usize) -> VoidResult { match *self { InlineVector::Inline(ref v) => { if v.capacity() >= len { Ok(()) } else { Err(ErrorReason::TypeCanNotResize) } } InlineVector::Dynamic(ref mut v) => { if v.capacity() >= len { v.resize(len, T::zero()); Ok(()) } else { // We could increase the vector capacity, but then // Inline and Dynamic would behave very different and we want // to avoid that Err(ErrorReason::TypeCanNotResize) } } } } } impl<T> Index<usize> for InlineVector<T> { type Output = T; fn index(&self, index: usize) -> &T { match *self { InlineVector::Inline(ref v) => &v[index], InlineVector::Dynamic(ref v) => &v[index], } } } impl<T> IndexMut<usize> for InlineVector<T> { fn index_mut(&mut self, index: usize) -> &mut T { match *self { InlineVector::Inline(ref mut v) => &mut v[index], InlineVector::Dynamic(ref mut v) => &mut v[index], } } } impl<T> Index<RangeFull> for InlineVector<T> { type Output = [T]; fn index(&self, _index: RangeFull) -> &[T] { match *self { InlineVector::Inline(ref v) => &v[..], InlineVector::Dynamic(ref v) => &v[..], } } } impl<T> IndexMut<RangeFull> for InlineVector<T> { fn index_mut(&mut self, _index: RangeFull) -> &mut [T] { match *self { InlineVector::Inline(ref mut v) => &mut v[..], InlineVector::Dynamic(ref mut v) => &mut v[..], } } } impl<T> Index<RangeFrom<usize>> for InlineVector<T> { type Output = [T];
fn index(&self, index: RangeFrom<usize>) -> &[T] { match *self { InlineVector::Inline(ref v) => &v[index], InlineVector::Dynamic(ref v) => &v[index], } } } impl<T> IndexMut<RangeFrom<usize>> for InlineVector<T> { fn index_mut(&mut self, index: RangeFrom<usize>) -> &mut [T] { match *self { InlineVector::Inline(ref mut v) => &mut v[index], InlineVector::Dynamic(ref mut v) => &mut v[index], } } } impl<T> Index<RangeTo<usize>> for InlineVector<T> { type Output = [T]; fn index(&self, index: RangeTo<usize>) -> &[T] { match *self { InlineVector::Inline(ref v) => &v[index], InlineVector::Dynamic(ref v) => &v[index], } } } impl<T> IndexMut<RangeTo<usize>> for InlineVector<T> { fn index_mut(&mut self, index: RangeTo<usize>) -> &mut [T] { match *self { InlineVector::Inline(ref mut v) => &mut v[index], InlineVector::Dynamic(ref mut v) => &mut v[index], } } } impl<T: Clone> Clone for InlineVector<T> { fn clone(&self) -> Self { match *self { InlineVector::Inline(ref v) => InlineVector::Inline(v.clone()), InlineVector::Dynamic(ref v) => InlineVector::Dynamic(v.clone()), } } } impl<T> FromIterator<T> for InlineVector<T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let mut c = InlineVector::with_capacity(64); for i in iter { c.push(i); } c } }
walletcreate.go
// Copyright © 2019, 2020 Weald Technology Trading // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "crypto/rand" "fmt" "os" "strings" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" bip39 "github.com/tyler-smith/go-bip39" distributed "github.com/wealdtech/go-eth2-wallet-distributed" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" hd "github.com/wealdtech/go-eth2-wallet-hd/v2" nd "github.com/wealdtech/go-eth2-wallet-nd/v2" ) var walletCreateCmd = &cobra.Command{ Use: "create", Short: "Create a wallet", Long: `Create a wallet. For example: ethdo wallet create --wallet="Primary wallet" --type=non-deterministic In quiet mode this will return 0 if the wallet is created successfully, otherwise 1.`, Run: func(cmd *cobra.Command, args []string) { ctx, cancel := context.WithTimeout(context.Background(), viper.GetDuration("timeout")) defer cancel() assert(viper.GetString("remote") == "", "wallet create not available with remote wallets") assert(viper.GetString("wallet") != "", "--wallet is required") assert(viper.GetString("type") != "", "--type is required") var err error switch strings.ToLower(viper.GetString("type")) { case "non-deterministic", "nd": assert(viper.GetString("mnemonic") == "", "--mnemonic is not allowed with non-deterministic wallets") err = walletCreateND(ctx, viper.GetString("wallet")) case "hierarchical deterministic", "hd": if quiet { fmt.Printf("Creation of hierarchical deterministic wallets prints its mnemonic, so cannot be run with the --quiet flag") os.Exit(_exitFailure) } assert(getWalletPassphrase() != "", "--walletpassphrase is required for hierarchical deterministic wallets") err = walletCreateHD(ctx, viper.GetString("wallet"), getWalletPassphrase(), viper.GetString("mnemonic")) case "distributed": assert(viper.GetString("mnemonic") == "", "--mnemonic is not allowed with distributed wallets") err = walletCreateDistributed(ctx, viper.GetString("wallet")) default: die("unknown wallet type") } errCheck(err, "Failed to create wallet") }, } // walletCreateND creates a non-deterministic wallet. func walletCreateND(ctx context.Context, name string) error { _, err := nd.CreateWallet(ctx, name, store, keystorev4.New()) return err } // walletCreateDistributed creates a distributed wallet. func walletCreateDistributed(ctx context.Context, name string) error { _, err := distributed.CreateWallet(ctx, name, store, keystorev4.New()) return err } // walletCreateHD creates a hierarchical-deterministic wallet. func walletCreateHD(ctx context.Context, name string, passphrase string, mnemonic string) error { encryptor := keystorev4.New() printMnemonic := mnemonic == "" mnemonicPassphrase := "" if mnemonic == "" { // Create a new random mnemonic. entropy := make([]byte, 32) _, err := rand.Read(entropy) if err != nil { return errors.Wrap(err, "failed to generate entropy for wallet mnemonic") } mnemonic, err = bip39.NewMnemonic(entropy) if err != nil { return errors.Wrap(err, "failed to generate wallet mnemonic") } } else { // We have an existing mnemonic. If there are more than 24 words we treat the additional characters as the passphrase. mnemonicParts := strings.Split(mnemonic, " ") if len(mnemonicParts) > 24 { mnemonic = strings.Join(mnemonicParts[:24], " ") mnemonicPassphrase = strings.Join(mnemonicParts[24:], " ") } } // Ensure the mnemonic is valid if !bip39.IsMnemonicValid(mnemonic) { return errors.New("mnemonic is not valid") } // Create seed from mnemonic and passphrase. seed := bip39.NewSeed(mnemonic, mnemonicPassphrase) _, err := hd.CreateWallet(ctx, name, []byte(passphrase), store, encryptor, seed) if printMnemonic { fmt.Printf(`The following phrase is your mnemonic for this wallet: %s Anyone with access to this mnemonic can recreate the accounts in this wallet, so please store this mnemonic safely. More information about mnemonics can be found at https://support.mycrypto.com/general-knowledge/cryptography/how-do-mnemonic-phrases-work Please note this mnemonic is not stored within the wallet, so cannot be retrieved or displayed again. As such, this mnemonic should be written down or otherwise protected before proceeding. `, mnemonic) } return err } func init() {
walletCmd.AddCommand(walletCreateCmd) walletFlags(walletCreateCmd) walletCreateCmd.Flags().String("type", "non-deterministic", "Type of wallet to create (non-deterministic or hierarchical deterministic)") if err := viper.BindPFlag("type", walletCreateCmd.Flags().Lookup("type")); err != nil { panic(err) } walletCreateCmd.Flags().String("mnemonic", "", "The 24-word mnemonic for a hierarchical deterministic wallet") if err := viper.BindPFlag("mnemonic", walletCreateCmd.Flags().Lookup("mnemonic")); err != nil { panic(err) } }
nanops.py
import numpy as np from . import dtypes, nputils, utils from .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method from .pycompat import dask_array_type try: import dask.array as dask_array except ImportError: dask_array = None def _replace_nan(a, val): """ replace nan in a by val, and returns the replaced array and the nan position """ mask = isnull(a) return where_method(val, mask, a), mask def _maybe_null_out(result, axis, mask, min_count=1): """ xarray version of pandas.core.nanops._maybe_null_out """ if hasattr(axis, "__len__"): # if tuple or list raise ValueError( "min_count is not available for reduction with more than one dimensions." ) if axis is not None and getattr(result, "ndim", False): null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if null_mask.any(): dtype, fill_value = dtypes.maybe_promote(result.dtype) result = result.astype(dtype) result[null_mask] = fill_value elif getattr(result, "dtype", None) not in dtypes.NAT_TYPES: null_mask = mask.size - mask.sum() if null_mask < min_count: result = np.nan return result def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs): """ In house nanargmin, nanargmax for object arrays. Always return integer type """ valid_count = count(value, axis=axis) value = fillna(value, fill_value) data = _dask_or_eager_func(func)(value, axis=axis, **kwargs) # TODO This will evaluate dask arrays and might be costly. if (valid_count == 0).any(): raise ValueError("All-NaN slice encountered") return data def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs): """ In house nanmin and nanmax for object array """ valid_count = count(value, axis=axis) filled_value = fillna(value, fill_value) data = getattr(np, func)(filled_value, axis=axis, **kwargs) if not hasattr(data, "dtype"): # scalar case data = fill_value if valid_count == 0 else data # we've computed a single min, max value of type object. # don't let np.array turn a tuple back into an array return utils.to_0d_object_array(data) return where_method(data, valid_count != 0) def nanmin(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("min", dtypes.get_pos_infinity(a.dtype), a, axis) module = dask_array if isinstance(a, dask_array_type) else nputils return module.nanmin(a, axis=axis) def nanmax(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("max", dtypes.get_neg_infinity(a.dtype), a, axis) module = dask_array if isinstance(a, dask_array_type) else nputils return module.nanmax(a, axis=axis) def nanargmin(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_pos_infinity(a.dtype) return _nan_argminmax_object("argmin", fill_value, a, axis=axis) module = dask_array if isinstance(a, dask_array_type) else nputils return module.nanargmin(a, axis=axis) def
(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_neg_infinity(a.dtype) return _nan_argminmax_object("argmax", fill_value, a, axis=axis) module = dask_array if isinstance(a, dask_array_type) else nputils return module.nanargmax(a, axis=axis) def nansum(a, axis=None, dtype=None, out=None, min_count=None): a, mask = _replace_nan(a, 0) result = _dask_or_eager_func("sum")(a, axis=axis, dtype=dtype) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs): """ In house nanmean. ddof argument will be used in _nanvar method """ from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method valid_count = count(value, axis=axis) value = fillna(value, 0) # As dtype inference is impossible for object dtype, we assume float # https://github.com/dask/dask/issues/3162 if dtype is None and value.dtype.kind == "O": dtype = value.dtype if value.dtype.kind in ["cf"] else float data = _dask_or_eager_func("sum")(value, axis=axis, dtype=dtype, **kwargs) data = data / (valid_count - ddof) return where_method(data, valid_count != 0) def nanmean(a, axis=None, dtype=None, out=None): if a.dtype.kind == "O": return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype) if isinstance(a, dask_array_type): return dask_array.nanmean(a, axis=axis, dtype=dtype) return np.nanmean(a, axis=axis, dtype=dtype) def nanmedian(a, axis=None, out=None): return _dask_or_eager_func("nanmedian", eager_module=nputils)(a, axis=axis) def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs): value_mean = _nanmean_ddof_object( ddof=0, value=value, axis=axis, keepdims=True, **kwargs ) squared = (value.astype(value_mean.dtype) - value_mean) ** 2 return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs) def nanvar(a, axis=None, dtype=None, out=None, ddof=0): if a.dtype.kind == "O": return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof) return _dask_or_eager_func("nanvar", eager_module=nputils)( a, axis=axis, dtype=dtype, ddof=ddof ) def nanstd(a, axis=None, dtype=None, out=None, ddof=0): return _dask_or_eager_func("nanstd", eager_module=nputils)( a, axis=axis, dtype=dtype, ddof=ddof ) def nanprod(a, axis=None, dtype=None, out=None, min_count=None): a, mask = _replace_nan(a, 1) result = _dask_or_eager_func("nanprod")(a, axis=axis, dtype=dtype, out=out) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def nancumsum(a, axis=None, dtype=None, out=None): return _dask_or_eager_func("nancumsum", eager_module=nputils)( a, axis=axis, dtype=dtype ) def nancumprod(a, axis=None, dtype=None, out=None): return _dask_or_eager_func("nancumprod", eager_module=nputils)( a, axis=axis, dtype=dtype )
nanargmax
360p_146.ts
version https://git-lfs.github.com/spec/v1 oid sha256:38aa8168c7fba1cab32ff8d5e3196a3cfe87dc710856047c88704f5dea9dafef
size 374496
struct_data_in_query_device_group_by_tags.go
//Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. // DataInQueryDeviceGroupByTags is a nested struct in iot response type DataInQueryDeviceGroupByTags struct { DeviceGroup []DeviceGroup `json:"DeviceGroup" xml:"DeviceGroup"` }
package iot
root_unix_test.go
//go:build !windows /* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bytes" "io" "os" "path/filepath" "strings" "testing" ) func checkPermsStderr() (string, error) { r, w, err := os.Pipe() if err != nil { return "", err } stderr := os.Stderr os.Stderr = w defer func() { os.Stderr = stderr }() checkPerms() w.Close() var text bytes.Buffer io.Copy(&text, r) return text.String(), nil } func
(t *testing.T) { tdir := t.TempDir() tfile := filepath.Join(tdir, "testconfig") fh, err := os.OpenFile(tfile, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0440) if err != nil { t.Errorf("Failed to create temp file: %s", err) } tconfig := settings.KubeConfig settings.KubeConfig = tfile defer func() { settings.KubeConfig = tconfig }() text, err := checkPermsStderr() if err != nil { t.Fatalf("could not read from stderr: %s", err) } expectPrefix := "WARNING: Kubernetes configuration file is group-readable. This is insecure. Location:" if !strings.HasPrefix(text, expectPrefix) { t.Errorf("Expected to get a warning for group perms. Got %q", text) } if err := fh.Chmod(0404); err != nil { t.Errorf("Could not change mode on file: %s", err) } text, err = checkPermsStderr() if err != nil { t.Fatalf("could not read from stderr: %s", err) } expectPrefix = "WARNING: Kubernetes configuration file is world-readable. This is insecure. Location:" if !strings.HasPrefix(text, expectPrefix) { t.Errorf("Expected to get a warning for world perms. Got %q", text) } }
TestCheckPerms
HipChatMonitor.py
from hipchat import HipChatManager import time import configparser _MAX_SLEEP_TIME = 5 _MIN_SLEEP_TIME = 2 _SPAM_EODBOT_URL = 3500 class HipChatMonitor: def __init__(self, eodBotParser): print("Initializing HipChatMonitor with eodBotParser: ",eodBotParser) self.sleepTime = _MIN_SLEEP_TIME self.lastIdChecked = "" self.eodBotParser = eodBotParser config = configparser.ConfigParser() config.read('config.ini') self.bot_id=config['HIPCHAT']['hipchat.bot_id']
self.hipChatManager = HipChatManager.HipChatManager(); self.spamLastEodBotUrlTime = 0 self.hipChatManager.send("[EodBot] I've been initialised! Troll time just started :)") self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll") def __adjustInterval(self, failed): if(failed == "true"): if(self.sleepTime < _MAX_SLEEP_TIME): self.sleepTime += 1 else: self.sleepTime = _MIN_SLEEP_TIME def start(self): while 1==1: newestMessage = self.hipChatManager.fetch() if((str(newestMessage["from"]) != "Sassy") and (str(newestMessage["from"]["id"]) != self.bot_id) and (newestMessage["id"] != self.lastIdChecked)): self.lastIdChecked = newestMessage["id"] print("Parsing message: ",newestMessage['message']) messageToSend = self.eodBotParser.parse(newestMessage['message']) if(messageToSend != None): self.hipChatManager.send(messageToSend) self.__adjustInterval("false") else: self.__adjustInterval("true") print("Sleeping for ",self.sleepTime," seconds") time.sleep(self.sleepTime) self.spamLastEodBotUrlTime += 1 if(self.spamLastEodBotUrlTime >= _SPAM_EODBOT_URL): self.hipChatManager.send("[EodBot] Visit http://6dc1e2bd.fbdev.midasplayer.com/ to teach me how to troll") self.spamLastEodBotUrlTime = 0
index.ts
import { execCmd } from "./execCmd"; type GitCommand = string & { _brand?: "GitCommand" };
gitIsMissing: boolean; commandFailed: boolean; stdout: string; }; export { runGitCommand }; async function runGitCommand( gitCommand: GitCommand ): Promise<GitCommandResult> { if (await gitIsMissing()) { return { stdout: "", commandFailed: true, gitIsMissing: true, }; } const res = await execCmd(gitCommand); const { stdout } = res; if ("isError" in res) { return { stdout, commandFailed: true, gitIsMissing: false, }; } return { stdout, commandFailed: false, gitIsMissing: false, }; } async function gitIsMissing() { const res = await execCmd("git --version"); return "isError" in res; }
type GitCommandResult = {
Salesforce.py
import importlib import logging import re import time from datetime import datetime from dateutil.parser import parse as parse_date, ParserError from pprint import pformat from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError from robot.utils import timestr_to_secs from cumulusci.robotframework.utils import get_locator_module_name from cumulusci.robotframework.form_handlers import get_form_handler from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.common.exceptions import ( StaleElementReferenceException, NoSuchElementException, JavascriptException, WebDriverException, ) import faker from simple_salesforce import SalesforceResourceNotFound from cumulusci.robotframework.utils import selenium_retry, capture_screenshot_on_error from SeleniumLibrary.errors import ElementNotFound, NoOpenBrowser from urllib3.exceptions import ProtocolError from cumulusci.core.template_utils import format_str from cumulusci.robotframework import locator_manager OID_REGEX = r"^(%2F)?([a-zA-Z0-9]{15,18})$" STATUS_KEY = ("status",) lex_locators = {} # will be initialized when Salesforce is instantiated # https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_composite_sobjects_collections_create.htm SF_COLLECTION_INSERTION_LIMIT = 200 @selenium_retry class Salesforce(object): """A keyword library for working with Salesforce Lightning pages While you can import this directly into any suite, the recommended way to include this in a test suite is to import the ``Salesforce.robot`` resource file. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self, debug=False, locators=None): self.debug = debug self._session_records = [] # Turn off info logging of all http requests logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( logging.WARN ) if locators: lex_locators.update(locators) else: self._init_locators() self._faker = faker.Faker("en_US") try: self.builtin.set_global_variable("${faker}", self._faker) except RobotNotRunningError: # this only happens during unit tests, and we don't care. pass def _init_locators(self): """Load the appropriate locator file for the current version If no version can be determined, we'll use the highest numbered locator file name. """ try: version = int(float(self.get_latest_api_version())) except RobotNotRunningError: # Likely this means we are running in the context of # documentation generation. Setting the version to # None will result in using the latest version of # locators. version = None locator_module_name = get_locator_module_name(version) self.locators_module = importlib.import_module(locator_module_name) lex_locators.update(self.locators_module.lex_locators) @property def builtin(self): return BuiltIn() @property def cumulusci(self): return self.builtin.get_library_instance("cumulusci.robotframework.CumulusCI") def initialize_location_strategies(self): """Initialize the Salesforce custom location strategies Note: This keyword is called automatically from *Open Test Browser* """ if not self.builtin.get_variable_value( "${LOCATION STRATEGIES INITIALIZED}", False ): # this manages strategies based on locators in a dictionary locator_manager.register_locators("sf", lex_locators) locator_manager.add_location_strategies() # these are more traditional location strategies based on keywords # or functions self.selenium.add_location_strategy( "text", "Salesforce.Locate Element by Text" ) self.selenium.add_location_strategy( "title", "Salesforce.Locate Element by Title" ) self.selenium.add_location_strategy("label", self.locate_element_by_label) self.builtin.set_suite_variable("${LOCATION STRATEGIES INITIALIZED}", True) @selenium_retry(False) def _jsclick(self, locator): """Use javascript to click an element on the page See https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1 """ self.selenium.wait_until_page_contains_element(locator) self.selenium.wait_until_element_is_enabled(locator) for should_retry in (True, False): try: # Setting the focus first seems to be required as of Spring'20 # (read: without it, tests started failing in that release). I # suspect it's because there is a focusOut handler on form # fields which need to be triggered for data to be accepted. element = self.selenium.get_webelement(locator) self.selenium.driver.execute_script( "arguments[0].focus(); arguments[0].click()", element ) return except StaleElementReferenceException: if should_retry: time.sleep(1) else: raise def set_faker_locale(self, locale): """Set the locale for fake data This sets the locale for all calls to the ``Faker`` keyword and ``${faker}`` variable. The default is en_US For a list of supported locales see [https://faker.readthedocs.io/en/master/locales.html|Localized Providers] in the Faker documentation. Example | Set Faker Locale fr_FR | ${french_address}= Faker address """ try: self._faker = faker.Faker(locale) except AttributeError: raise Exception(f"Unknown locale for fake data: '{locale}'") def get_fake_data(self, fake, *args, **kwargs): """Return fake data This uses the [https://faker.readthedocs.io/en/master/|Faker] library to provide fake data in a variety of formats (names, addresses, credit card numbers, dates, phone numbers, etc) and locales (en_US, fr_FR, etc). The _fake_ argument is the name of a faker property such as ``first_name``, ``address``, ``lorem``, etc. Additional arguments depend on type of data requested. For a comprehensive list of the types of fake data that can be generated see [https://faker.readthedocs.io/en/master/providers.html|Faker providers] in the Faker documentation. The return value is typically a string, though in some cases some other type of object will be returned. For example, the ``date_between`` fake returns a [https://docs.python.org/3/library/datetime.html#date-objects|datetime.date object]. Each time a piece of fake data is requested it will be regenerated, so that multiple calls will usually return different data. This keyword can also be called using robot's extended variable syntax using the variable ``${faker}``. In such a case, the data being asked for is a method call and arguments must be enclosed in parentheses and be quoted. Arguments should not be quoted when using the keyword. To generate fake data for a locale other than en_US, use the keyword ``Set Faker Locale`` prior to calling this keyword. Examples | # Generate a fake first name | ${first_name}= Get fake data first_name | # Generate a fake date in the default format | ${date}= Get fake data date | # Generate a fake date with an explicit format | ${date}= Get fake data date pattern=%Y-%m-%d | # Generate a fake date using extended variable syntax | Input text //input ${faker.date(pattern='%Y-%m-%d')} """ try: return self._faker.format(fake, *args, **kwargs) except AttributeError: raise Exception(f"Unknown fake data request: '{fake}'") def get_latest_api_version(self): return self.cumulusci.org.latest_api_version def create_webdriver_with_retry(self, *args, **kwargs): """Call the Create Webdriver keyword. Retry on connection resets which can happen if custom domain propagation is slow. """ # Get selenium without referencing selenium.driver which doesn't exist yet selenium = self.builtin.get_library_instance("SeleniumLibrary") for _ in range(12): try: return selenium.create_webdriver(*args, **kwargs) except ProtocolError: # Give browser some more time to start up time.sleep(5) raise Exception("Could not connect to remote webdriver after 1 minute") @capture_screenshot_on_error def click_modal_button(self, title): """Clicks a button in a Lightning modal.""" locator = lex_locators["modal"]["button"].format(title) self.selenium.wait_until_page_contains_element(locator) self.selenium.wait_until_element_is_enabled(locator) self._jsclick(locator) @capture_screenshot_on_error def click_object_button(self, title): """Clicks a button in an object's actions.""" locator = lex_locators["object"]["button"].format(title=title) self._jsclick(locator) self.wait_until_modal_is_open() @capture_screenshot_on_error def scroll_element_into_view(self, locator): """Scroll the element identified by 'locator' This is a replacement for the keyword of the same name in SeleniumLibrary. The SeleniumLibrary implementation uses an unreliable method on Firefox. This keyword uses a more reliable technique. For more info see https://stackoverflow.com/a/52045231/7432 """ element = self.selenium.get_webelement(locator) self.selenium.driver.execute_script("arguments[0].scrollIntoView()", element) @capture_screenshot_on_error def load_related_list(self, heading, tries=10): """Scrolls down until the specified related list loads. If the related list isn't found, the keyword will scroll down in 100 pixel increments to trigger lightning into loading the list. This process of scrolling will be repeated until the related list has been loaded or we've tried several times (the default is 10 tries) """ locator = lex_locators["record"]["related"]["card"].format(heading) for i in range(tries): try: self.selenium.scroll_element_into_view(locator) return except (ElementNotFound, JavascriptException, WebDriverException): self.builtin.log( f"related list '{heading}' not found; scrolling...", "DEBUG" ) self.selenium.execute_javascript("window.scrollBy(0, 100)") self.wait_for_aura() raise AssertionError(f"Timed out waiting for related list '{heading}' to load.") def click_related_list_button(self, heading, button_title): """Clicks a button in the heading of a related list. Waits for a modal to open after clicking the button. """ self.load_related_list(heading) locator = lex_locators["record"]["related"]["button"].format( heading, button_title ) self._jsclick(locator) self.wait_until_modal_is_open() @capture_screenshot_on_error def
(self, heading, title): """Clicks a link in the related list with the specified heading. This keyword will automatically call *Wait until loading is complete*. """ self.load_related_list(heading) locator = lex_locators["record"]["related"]["link"].format(heading, title) try: self._jsclick(locator) except Exception as e: self.builtin.log(f"Exception: {e}", "DEBUG") raise Exception( f"Unable to find related link under heading '{heading}' with the text '{title}'" ) self.wait_until_loading_is_complete() def click_related_item_popup_link(self, heading, title, link): """Clicks a link in the popup menu for a related list item. heading specifies the name of the list, title specifies the name of the item, and link specifies the name of the link """ self.load_related_list(heading) locator = lex_locators["record"]["related"]["popup_trigger"].format( heading, title ) self.selenium.wait_until_page_contains_element(locator) self._jsclick(locator) locator = lex_locators["popup"]["link"].format(link) self._jsclick(locator) self.wait_until_loading_is_complete() def close_modal(self): """Closes the open modal""" locator = lex_locators["modal"]["close"] self._jsclick(locator) def current_app_should_be(self, app_name): """Validates the currently selected Salesforce App""" locator = lex_locators["app_launcher"]["current_app"].format(app_name) elem = self.selenium.get_webelement(locator) assert app_name == elem.text, "Expected app to be {} but found {}".format( app_name, elem.text ) def delete_session_records(self): """Deletes records that were created while running this test case. (Only records specifically recorded using the Store Session Record keyword are deleted.) """ self._session_records.reverse() self.builtin.log("Deleting {} records".format(len(self._session_records))) for record in self._session_records[:]: self.builtin.log(" Deleting {type} {id}".format(**record)) try: self.salesforce_delete(record["type"], record["id"]) except SalesforceResourceNotFound: self.builtin.log(" {type} {id} is already deleted".format(**record)) except Exception as e: self.builtin.log( " {type} {id} could not be deleted:".format(**record), level="WARN", ) self.builtin.log(" {}".format(e), level="WARN") def get_active_browser_ids(self): """Return the id of all open browser ids""" # This relies on some private data structures, but presently # there is no other way. There's been a discussion in the # robot slack channels about adding a new keyword that does # what this keyword does. When that happens, we can remove # this keyword. driver_ids = [] try: driver_cache = self.selenium._drivers except NoOpenBrowser: return [] for index, driver in enumerate(driver_cache._connections): if driver not in driver_cache._closed: # SeleniumLibrary driver ids start at one rather than zero driver_ids.append(index + 1) return driver_ids def get_current_record_id(self): """Parses the current url to get the object id of the current record. Expects url format like: [a-zA-Z0-9]{15,18} """ url = self.selenium.get_location() for part in url.split("/"): oid_match = re.match(OID_REGEX, part) if oid_match is not None: return oid_match.group(2) raise AssertionError("Could not parse record id from url: {}".format(url)) def field_value_should_be(self, label, expected_value): """Verify that the form field for the given label is the expected value Example: | Field value should be Account Name ACME Labs """ value = self.get_field_value(label) self.builtin.should_be_equal(value, expected_value) def get_field_value(self, label): """Return the current value of a form field based on the field label""" api_version = int(float(self.get_latest_api_version())) locator = self._get_input_field_locator(label) if api_version >= 51: # this works for both First Name (input) and Account Name (picklist) value = self.selenium.get_value(locator) else: # older releases it's a bit more complex element = self.selenium.get_webelement(locator) if element.get_attribute("role") == "combobox": value = self.selenium.get_text(f"sf:object.field_lookup_value:{label}") else: value = self.selenium.get_value(f"sf:object.field:{label}") return value def get_locator(self, path, *args, **kwargs): """Returns a rendered locator string from the Salesforce lex_locators dictionary. This can be useful if you want to use an element in a different way than the built in keywords allow. """ locator = lex_locators for key in path.split("."): locator = locator[key] return locator.format(*args, **kwargs) def get_record_type_id(self, obj_type, developer_name): """Returns the Record Type Id for a record type name""" soql = "SELECT Id FROM RecordType WHERE SObjectType='{}' and DeveloperName='{}'".format( obj_type, developer_name ) res = self.cumulusci.sf.query_all(soql) return res["records"][0]["Id"] def get_related_list_count(self, heading): """Returns the number of items indicated for a related list.""" locator = lex_locators["record"]["related"]["count"].format(heading) count = self.selenium.get_webelement(locator).text count = count.replace("(", "").replace(")", "") return int(count) def go_to_object_home(self, obj_name): """Navigates to the Home view of a Salesforce Object""" url = self.cumulusci.org.lightning_base_url url = "{}/lightning/o/{}/home".format(url, obj_name) self.selenium.go_to(url) self.wait_until_loading_is_complete(lex_locators["actions"]) def go_to_object_list(self, obj_name, filter_name=None): """Navigates to the Home view of a Salesforce Object""" url = self.cumulusci.org.lightning_base_url url = "{}/lightning/o/{}/list".format(url, obj_name) if filter_name: url += "?filterName={}".format(filter_name) self.selenium.go_to(url) self.wait_until_loading_is_complete(lex_locators["actions"]) def go_to_record_home(self, obj_id): """Navigates to the Home view of a Salesforce Object""" url = self.cumulusci.org.lightning_base_url url = "{}/lightning/r/{}/view".format(url, obj_id) self.selenium.go_to(url) self.wait_until_loading_is_complete(lex_locators["actions"]) def go_to_setup_home(self): """Navigates to the Home tab of Salesforce Setup""" url = self.cumulusci.org.lightning_base_url self.selenium.go_to(url + "/lightning/setup/SetupOneHome/home") self.wait_until_loading_is_complete() def go_to_setup_object_manager(self): """Navigates to the Object Manager tab of Salesforce Setup""" url = self.cumulusci.org.lightning_base_url self.selenium.go_to(url + "/lightning/setup/ObjectManager/home") self.wait_until_loading_is_complete() def header_field_should_have_value(self, label): """Validates that a field in the record header has a text value. NOTE: Use other keywords for non-string value types """ locator = lex_locators["record"]["header"]["field_value"].format(label) self.selenium.page_should_contain_element(locator) def header_field_should_not_have_value(self, label): """Validates that a field in the record header does not have a value. NOTE: Use other keywords for non-string value types """ locator = lex_locators["record"]["header"]["field_value"].format(label) self.selenium.page_should_not_contain_element(locator) def header_field_should_have_link(self, label): """Validates that a field in the record header has a link as its value""" locator = lex_locators["record"]["header"]["field_value_link"].format(label) self.selenium.page_should_contain_element(locator) def header_field_should_not_have_link(self, label): """Validates that a field in the record header does not have a link as its value""" locator = lex_locators["record"]["header"]["field_value_link"].format(label) self.selenium.page_should_not_contain_element(locator) def click_header_field_link(self, label): """Clicks a link in record header.""" locator = lex_locators["record"]["header"]["field_value_link"].format(label) self._jsclick(locator) def header_field_should_be_checked(self, label): """Validates that a checkbox field in the record header is checked""" locator = lex_locators["record"]["header"]["field_value_checked"].format(label) self.selenium.page_should_contain_element(locator) def header_field_should_be_unchecked(self, label): """Validates that a checkbox field in the record header is unchecked""" locator = lex_locators["record"]["header"]["field_value_unchecked"].format( label ) self.selenium.page_should_contain_element(locator) def log_browser_capabilities(self, loglevel="INFO"): """Logs all of the browser capabilities as reported by selenium""" output = "selenium browser capabilities:\n" output += pformat(self.selenium.driver.capabilities, indent=4) self.builtin.log(output, level=loglevel) @capture_screenshot_on_error def open_app_launcher(self, retry=True): """Opens the Saleforce App Launcher Modal Note: starting with Spring '20 the app launcher button opens a menu rather than a modal. To maintain backwards compatibility, this keyword will continue to open the modal rather than the menu. If you need to interact with the app launcher menu, you will need to create a custom keyword. If the retry parameter is true, the keyword will close and then re-open the app launcher if it times out while waiting for the dialog to open. """ self._jsclick("sf:app_launcher.button") self.selenium.wait_until_element_is_visible("sf:app_launcher.view_all") self._jsclick("sf:app_launcher.view_all") self.wait_until_modal_is_open() try: # the modal may be open, but not yet fully rendered # wait until at least one link appears. We've seen that sometimes # the dialog hangs prior to any links showing up self.selenium.wait_until_element_is_visible( "xpath://ul[contains(@class, 'al-modal-list')]//li" ) except Exception as e: # This should never happen, yet it does. Experience has # shown that sometimes (at least in spring '20) the modal # never renders. Refreshing the modal seems to fix it. if retry: self.builtin.log( f"caught exception {e} waiting for app launcher; retrying", "DEBUG" ) self.selenium.press_keys("sf:modal.is_open", "ESCAPE") self.wait_until_modal_is_closed() self.open_app_launcher(retry=False) else: self.builtin.log( "caught exception waiting for app launcher; not retrying", "DEBUG" ) raise def populate_field(self, name, value): """Enters a value into an input or textarea field. 'name' represents the label on the page (eg: "First Name"), and 'value' is the new value. Any existing value will be replaced. """ locator = self._get_input_field_locator(name) self._populate_field(locator, value) def populate_lookup_field(self, name, value): """Enters a value into a lookup field.""" input_locator = self._get_input_field_locator(name) menu_locator = lex_locators["object"]["field_lookup_link"].format(value) self._populate_field(input_locator, value) for x in range(3): self.wait_for_aura() try: self.selenium.get_webelement(menu_locator) except ElementNotFound: # Give indexing a chance to catch up time.sleep(2) field = self.selenium.get_webelement(input_locator) field.send_keys(Keys.BACK_SPACE) else: break self.selenium.set_focus_to_element(menu_locator) self._jsclick(menu_locator) self.wait_for_aura() def _get_input_field_locator(self, name): """Given an input field label, return a locator for the related input field This looks for a <label> element with the given text, or a label with a span with the given text. The value of the 'for' attribute is then extracted from the label and used to create a new locator with that id. For example, the locator 'abc123' will be returned for the following html: <label for='abc123'>First Name</label> -or- <label for='abc123'><span>First Name</span> """ try: # we need to make sure that if a modal is open, we only find # the input element inside the modal. Otherwise it's possible # that the xpath could pick the wrong element. self.selenium.get_webelement(lex_locators["modal"]["is_open"]) modal_prefix = "//div[contains(@class, 'modal-container')]" except ElementNotFound: modal_prefix = "" locator = modal_prefix + lex_locators["object"]["field_label"].format( name, name ) input_element_id = self.selenium.get_element_attribute(locator, "for") return input_element_id def _populate_field(self, locator, value): self.builtin.log(f"value: {value}' locator: '{locator}'", "DEBUG") field = self.selenium.get_webelement(locator) self._focus(field) if field.get_attribute("value"): self._clear(field) field.send_keys(value) def _focus(self, element): """Set focus to an element In addition to merely setting the focus, we click the mouse to the field in case there are functions tied to that event. """ actions = ActionChains(self.selenium.driver) actions.move_to_element(element).click().perform() self.selenium.set_focus_to_element(element) def _clear(self, element): """Clear the field, using any means necessary This is surprisingly hard to do with a generic solution. Some methods work for some components and/or on some browsers but not others. Therefore, several techniques are employed. """ element.clear() self.selenium.driver.execute_script("arguments[0].value = '';", element) # Select all and delete just in case the element didn't get cleared element.send_keys(Keys.HOME + Keys.SHIFT + Keys.END) element.send_keys(Keys.BACKSPACE) if element.get_attribute("value"): # Give the UI a chance to settle down. The sleep appears # necessary. Without it, this keyword sometimes fails to work # properly. With it, I was able to run 700+ tests without a single # failure. time.sleep(0.25) # Even after all that, some elements refuse to be cleared out. # I'm looking at you, currency fields on Firefox. if element.get_attribute("value"): self._force_clear(element) def _force_clear(self, element): """Use brute-force to clear an element This moves the cursor to the end of the input field and then issues a series of backspace keys to delete the data in the field. """ value = element.get_attribute("value") actions = ActionChains(self.selenium.driver) actions.move_to_element(element).click().send_keys(Keys.END) for character in value: actions.send_keys(Keys.BACKSPACE) actions.perform() def populate_form(self, **kwargs): """Enters multiple values from a mapping into form fields.""" for name, value in kwargs.items(): self.populate_field(name, value) def remove_session_record(self, obj_type, obj_id): """Remove a record from the list of records that should be automatically removed.""" try: self._session_records.remove({"type": obj_type, "id": obj_id}) except ValueError: self.builtin.log( "Did not find record {} {} in the session records list".format( obj_type, obj_id ) ) def select_record_type(self, label): """Selects a record type while adding an object.""" self.wait_until_modal_is_open() locator = lex_locators["object"]["record_type_option"].format(label) self._jsclick(locator) self.selenium.click_button("Next") @capture_screenshot_on_error def select_app_launcher_app(self, app_name): """Navigates to a Salesforce App via the App Launcher""" locator = lex_locators["app_launcher"]["app_link"].format(app_name) self.open_app_launcher() self.selenium.wait_until_page_contains_element(locator, timeout=30) self.selenium.set_focus_to_element(locator) elem = self.selenium.get_webelement(locator) link = elem.find_element_by_xpath("../../..") self.selenium.set_focus_to_element(link) link.click() self.wait_until_modal_is_closed() @capture_screenshot_on_error def select_app_launcher_tab(self, tab_name): """Navigates to a tab via the App Launcher""" locator = lex_locators["app_launcher"]["tab_link"].format(tab_name) self.open_app_launcher() self.selenium.wait_until_page_contains_element(locator) self.selenium.set_focus_to_element(locator) self._jsclick(locator) self.wait_until_modal_is_closed() def salesforce_delete(self, obj_name, obj_id): """Deletes a Salesforce object by object name and Id. Example: The following example assumes that ``${contact id}`` has been previously set. The example deletes the Contact with that Id. | Salesforce Delete Contact ${contact id} """ self.builtin.log("Deleting {} with Id {}".format(obj_name, obj_id)) obj_class = getattr(self.cumulusci.sf, obj_name) obj_class.delete(obj_id) self.remove_session_record(obj_name, obj_id) def salesforce_get(self, obj_name, obj_id): """Gets a Salesforce object by Id and returns the result as a dict. Example: The following example assumes that ``${contact id}`` has been previously set. The example retrieves the Contact object with that Id and then logs the Name field. | &{contact}= Salesforce Get Contact ${contact id} | log Contact name: ${contact['Name']} """ self.builtin.log(f"Getting {obj_name} with Id {obj_id}") obj_class = getattr(self.cumulusci.sf, obj_name) return obj_class.get(obj_id) def salesforce_insert(self, obj_name, **kwargs): """Creates a new Salesforce object and returns the Id. The fields of the object may be defined with keyword arguments where the keyword name is the same as the field name. The object name and Id is passed to the *Store Session Record* keyword, and will be deleted when the keyword *Delete Session Records* is called. As a best practice, either *Delete Session Records* or *Delete Records and Close Browser* from Salesforce.robot should be called as a suite teardown. Example: The following example creates a new Contact with the first name of "Eleanor" and the last name of "Rigby". | ${contact id}= Salesforce Insert Contact | ... FirstName=Eleanor | ... LastName=Rigby """ self.builtin.log("Inserting {} with values {}".format(obj_name, kwargs)) obj_class = getattr(self.cumulusci.sf, obj_name) res = obj_class.create(kwargs) self.store_session_record(obj_name, res["id"]) return res["id"] def _salesforce_generate_object(self, obj_name, **fields): obj = {"attributes": {"type": obj_name}} # Object type to create obj.update(fields) return obj def generate_test_data(self, obj_name, number_to_create, **fields): """Generate bulk test data This returns an array of dictionaries with template-formatted arguments which can be passed to the *Salesforce Collection Insert* keyword. You can use ``{{number}}`` to represent the unique index of the row in the list of rows. If the entire string consists of a number, Salesforce API will treat the value as a number. Example: The following example creates three new Contacts: | @{objects} = Generate Test Data Contact 3 | ... Name=User {{number}} | ... Age={{number}} The example code will generate Contact objects with these fields: | [{'Name': 'User 0', 'Age': '0'}, | {'Name': 'User 1', 'Age': '1'}, | {'Name': 'User 2', 'Age': '2'}] Python Expression Syntax is allowed so computed templates like this are also allowed: ``{{1000 + number}}`` Python operators can be used, but no functions or variables are provided, so mostly you just have access to mathematical and logical operators. The Python operators are described here: https://www.digitalocean.com/community/tutorials/how-to-do-math-in-python-3-with-operators Contact the CCI team if you have a use-case that could benefit from more expression language power. Templates can also be based on faker patterns like those described here: https://faker.readthedocs.io/en/master/providers.html Most examples can be pasted into templates verbatim: | @{objects}= Generate Test Data Contact 200 | ... Name={{fake.first_name}} {{fake.last_name}} | ... MailingStreet={{fake.street_address}} | ... MailingCity=New York | ... MailingState=NY | ... MailingPostalCode=12345 | ... Email={{fake.email(domain="salesforce.com")}} """ objs = [] for i in range(int(number_to_create)): formatted_fields = { name: format_str(value, {"number": i}) for name, value in fields.items() } newobj = self._salesforce_generate_object(obj_name, **formatted_fields) objs.append(newobj) return objs def salesforce_collection_insert(self, objects): """Inserts records that were created with *Generate Test Data*. _objects_ is a list of data, typically generated by the *Generate Test Data* keyword. A 200 record limit is enforced by the Salesforce APIs. The object name and Id is passed to the *Store Session Record* keyword, and will be deleted when the keyword *Delete Session Records* is called. As a best practice, either *Delete Session Records* or **Delete Records and Close Browser* from Salesforce.robot should be called as a suite teardown. Example: | @{objects}= Generate Test Data Contact 200 | ... FirstName=User {{number}} | ... LastName={{fake.last_name}} | Salesforce Collection Insert ${objects} """ assert ( not obj.get("id", None) for obj in objects ), "Insertable objects should not have IDs" assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, ( "Cannot insert more than %s objects with this keyword" % SF_COLLECTION_INSERTION_LIMIT ) records = self.cumulusci.sf.restful( "composite/sobjects", method="POST", json={"allOrNone": True, "records": objects}, ) for idx, (record, obj) in enumerate(zip(records, objects)): if record["errors"]: raise AssertionError( "Error on Object {idx}: {record} : {obj}".format(**vars()) ) self.store_session_record(obj["attributes"]["type"], record["id"]) obj["id"] = record["id"] obj[STATUS_KEY] = record return objects def salesforce_collection_update(self, objects): """Updates records described as Robot/Python dictionaries. _objects_ is a dictionary of data in the format returned by the *Salesforce Collection Insert* keyword. A 200 record limit is enforced by the Salesforce APIs. Example: The following example creates ten accounts and then updates the Rating from "Cold" to "Hot" | ${data}= Generate Test Data Account 10 | ... Name=Account #{{number}} | ... Rating=Cold | ${accounts}= Salesforce Collection Insert ${data} | | FOR ${account} IN @{accounts} | Set to dictionary ${account} Rating Hot | END | Salesforce Collection Update ${accounts} """ for obj in objects: assert obj[ "id" ], "Should be a list of objects with Ids returned by Salesforce Collection Insert" if STATUS_KEY in obj: del obj[STATUS_KEY] assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, ( "Cannot update more than %s objects with this keyword" % SF_COLLECTION_INSERTION_LIMIT ) records = self.cumulusci.sf.restful( "composite/sobjects", method="PATCH", json={"allOrNone": True, "records": objects}, ) for record, obj in zip(records, objects): obj[STATUS_KEY] = record for idx, (record, obj) in enumerate(zip(records, objects)): if record["errors"]: raise AssertionError( "Error on Object {idx}: {record} : {obj}".format(**vars()) ) def salesforce_query(self, obj_name, **kwargs): """Constructs and runs a simple SOQL query and returns a list of dictionaries. By default the results will only contain object Ids. You can specify a SOQL SELECT clause via keyword arguments by passing a comma-separated list of fields with the ``select`` keyword argument. You can supply keys and values to match against in keyword arguments, or a full SOQL where-clause in a keyword argument named ``where``. If you supply both, they will be combined with a SOQL "AND". ``order_by`` and ``limit`` keyword arguments are also supported as shown below. Examples: The following example searches for all Contacts where the first name is "Eleanor". It returns the "Name" and "Id" fields and logs them to the robot report: | @{records}= Salesforce Query Contact select=Id,Name | ... FirstName=Eleanor | FOR ${record} IN @{records} | log Name: ${record['Name']} Id: ${record['Id']} | END Or with a WHERE-clause, we can look for the last contact where the first name is NOT Eleanor. | @{records}= Salesforce Query Contact select=Id,Name | ... where=FirstName!='Eleanor' | ... order_by=LastName desc | ... limit=1 """ query = self._soql_query_builder(obj_name, **kwargs) self.builtin.log("Running SOQL Query: {}".format(query)) return self.cumulusci.sf.query_all(query).get("records", []) def _soql_query_builder( self, obj_name, select=None, order_by=None, limit=None, where=None, **kwargs ): query = "SELECT " if select: query += select else: query += "Id" query += " FROM {}".format(obj_name) where_clauses = [] if where: where_clauses = [where] for key, value in kwargs.items(): where_clauses.append("{} = '{}'".format(key, value)) if where_clauses: query += " WHERE " + " AND ".join(where_clauses) if order_by: query += " ORDER BY " + order_by if limit: assert int(limit), "Limit should be an integer" query += f" LIMIT {limit}" return query def salesforce_update(self, obj_name, obj_id, **kwargs): """Updates a Salesforce object by Id. The keyword returns the result from the underlying simple_salesforce ``insert`` method, which is an HTTP status code. As with `Salesforce Insert`, field values are specified as keyword arguments. The following example assumes that ${contact id} has been previously set, and adds a Description to the given contact. | &{contact}= Salesforce Update Contact ${contact id} | ... Description=This Contact created during a test | Should be equal as numbers ${result} 204 """ self.builtin.log( "Updating {} {} with values {}".format(obj_name, obj_id, kwargs) ) obj_class = getattr(self.cumulusci.sf, obj_name) return obj_class.update(obj_id, kwargs) def soql_query(self, query): """Runs a simple SOQL query and returns the dict results The _query_ parameter must be a properly quoted SOQL query statement. The return value is a dictionary. The dictionary contains the keys as documented for the raw API call. The most useful key is ``records``, which contains a list of records which were matched by the query. Example The following example searches for all Contacts with a first name of "Eleanor" and a last name of "Rigby", and then prints the name of the first record found. | ${result}= SOQL Query | ... SELECT Name, Id FROM Contact WHERE FirstName='Eleanor' AND LastName='Rigby' | Run keyword if len($result['records']) == 0 Fail No records found | | ${contact}= Get from list ${result['records']} 0 | Should be equal ${contact['Name']} Eleanor Rigby """ self.builtin.log("Running SOQL Query: {}".format(query)) return self.cumulusci.sf.query_all(query) def store_session_record(self, obj_type, obj_id): """Stores a Salesforce record's Id for use in the *Delete Session Records* keyword. This keyword is automatically called by *Salesforce Insert*. """ self.builtin.log("Storing {} {} to session records".format(obj_type, obj_id)) self._session_records.append({"type": obj_type, "id": obj_id}) @capture_screenshot_on_error def wait_until_modal_is_open(self): """Wait for modal to open""" self.selenium.wait_until_page_contains_element( lex_locators["modal"]["is_open"], timeout=15, error="Expected to see a modal window, but didn't", ) def wait_until_modal_is_closed(self): """Wait for modal to close""" self.selenium.wait_until_page_does_not_contain_element( lex_locators["modal"]["is_open"], timeout=15 ) def wait_until_loading_is_complete(self, locator=None): """Wait for LEX page to load. (We're actually waiting for the actions ribbon to appear.) """ locator = lex_locators["body"] if locator is None else locator try: self.selenium.wait_until_page_contains_element(locator) self.wait_for_aura() # this knowledge article recommends waiting a second. I don't # like it, but it seems to help. We should do a wait instead, # but I can't figure out what to wait on. # https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1 time.sleep(1) except Exception: try: self.selenium.capture_page_screenshot() except Exception as e: self.builtin.warn("unable to capture screenshot: {}".format(str(e))) raise @capture_screenshot_on_error def wait_until_salesforce_is_ready(self, locator=None, timeout=None, interval=5): """Waits until we are able to render the initial salesforce landing page It will continue to refresh the page until we land on a lightning page or until a timeout has been reached. The timeout can be specified in any time string supported by robot (eg: number of seconds, "3 minutes", etc.). If not specified, the default selenium timeout will be used. This keyword will wait a few seconds between each refresh, as well as wait after each refresh for the page to fully render (ie: it calls wait_for_aura()) """ # Note: we can't just ask selenium to wait for an element, # because the org might not be availble due to infrastructure # issues (eg: the domain not being propagated). In such a case # the element will never come. Instead, what we need to do is # repeatedly refresh the page until the org responds. # # This assumes that any lightning page is a valid stopping # point. If salesforce starts rendering error pages with # lightning, or an org's default home page is not a lightning # page, we may have to rethink that strategy. interval = 5 # seconds between each refresh. timeout = timeout if timeout else self.selenium.get_selenium_timeout() timeout_seconds = timestr_to_secs(timeout) start_time = time.time() login_url = self.cumulusci.login_url() locator = lex_locators["body"] if locator is None else locator while True: try: self.selenium.wait_for_condition( "return (document.readyState == 'complete')" ) self.wait_for_aura() # If the following doesn't throw an error, we're good to go. self.selenium.get_webelement(locator) break except Exception as e: self.builtin.log( "caught exception while waiting: {}".format(str(e)), "DEBUG" ) if time.time() - start_time > timeout_seconds: self.selenium.log_location() raise Exception("Timed out waiting for a lightning page") # known edge cases that can be worked around if self._check_for_login_failure(): continue elif self._check_for_classic(): continue # not a known edge case; take a deep breath and # try again. time.sleep(interval) self.selenium.go_to(login_url) def breakpoint(self): """Serves as a breakpoint for the robot debugger Note: this keyword is a no-op unless the debug option for the task has been set to True. Unless the option has been set, this keyword will have no effect on a running test. """ return None def _check_for_classic(self): """Switch to lightning if we land on a classic page This seems to happen randomly, causing tests to fail catastrophically. The idea is to detect such a case and auto-click the "switch to lightning" link """ try: # we don't actually want to wait here, but if we don't # explicitly wait, we'll implicitly wait longer than # necessary. This needs to be a quick-ish check. self.selenium.wait_until_element_is_visible( "class:switch-to-lightning", timeout=2 ) self.builtin.log( "It appears we are on a classic page; attempting to switch to lightning", "WARN", ) # this screenshot should be removed at some point, # but for now I want to make sure we see what the # page looks like if we get here. self.selenium.capture_page_screenshot() # just in case there's a modal present we'll try simulating # the escape key. Then, click on the switch-to-lightning link self.selenium.press_keys(None, "ESC") self.builtin.sleep("1 second") self.selenium.click_link("class:switch-to-lightning") return True except (NoSuchElementException, AssertionError): return False def _check_for_login_failure(self): """Handle the case where we land on a login screen Sometimes we get redirected to a login URL rather than being logged in, and we've yet to figure out precisely why that happens. Experimentation shows that authentication has already happened, so in this case we'll try going back to the instance url rather than the front door servlet. Admittedly, this is a bit of a hack, but it's better than never getting past this redirect. """ location = self.selenium.get_location() if "//test.salesforce.com" in location or "//login.salesforce.com" in location: login_url = self.cumulusci.org.config["instance_url"] self.builtin.log(f"setting login_url temporarily to {login_url}", "DEBUG") self.selenium.go_to(login_url) return True return False def elapsed_time_for_last_record( self, obj_name, start_field, end_field, order_by, **kwargs ): """For records representing jobs or processes, compare the record's start-time to its end-time to see how long a process took. Arguments: obj_name: SObject to look for last record start_field: Name of the datetime field that represents the process start end_field: Name of the datetime field that represents the process end order_by: Field name to order by. Should be a datetime field, and usually is just the same as end_field. where: Optional Where-clause to use for filtering Other keywords are used for filtering as in the Salesforce Query keywordf The last matching record queried and summarized. Example: ${time_in_seconds} = Elapsed Time For Last Record ... obj_name=AsyncApexJob ... where=ApexClass.Name='BlahBlah' ... start_field=CreatedDate ... end_field=CompletedDate ... order_by=CompletedDate """ if len(order_by.split()) != 1: raise Exception("order_by should be a simple field name") query = self._soql_query_builder( obj_name, select=f"{start_field}, {end_field}", order_by=order_by + " DESC NULLS LAST", limit=1, **kwargs, ) response = self.soql_query(query) results = response["records"] if results: record = results[0] return _duration(record[start_field], record[end_field], record) else: raise Exception(f"Matching record not found: {query}") def start_performance_timer(self): """Start an elapsed time stopwatch for performance tests. See the docummentation for **Stop Performance Timer** for more information. Example: Start Performance Timer Do Something Stop Performance Timer """ BuiltIn().set_test_variable("${__start_time}", datetime.now()) def stop_performance_timer(self): """Record the results of a stopwatch. For perf testing. This keyword uses Set Test Elapsed Time internally and therefore outputs in all of the ways described there. Example: Start Performance Timer Do Something Stop Performance Timer """ builtins = BuiltIn() start_time = builtins.get_variable_value("${__start_time}") if start_time: seconds = (datetime.now() - start_time).seconds assert seconds is not None self.set_test_elapsed_time(seconds) else: raise Exception( "Elapsed time clock was not started. " "Use the Start Elapsed Time keyword to do so." ) def set_test_elapsed_time(self, elapsedtime): """This keyword captures a computed rather than measured elapsed time for performance tests. For example, if you were performance testing a Salesforce batch process, you might want to store the Salesforce-measured elapsed time of the batch process instead of the time measured in the CCI client process. The keyword takes a single argument which is either a number of seconds or a Robot time string (https://robotframework.org/robotframework/latest/libraries/DateTime.html#Time%20formats). Using this keyword will automatically add the tag cci_metric_elapsed_time to the test case and ${cci_metric_elapsed_time} to the test's variables. cci_metric_elapsed_time is not included in Robot's html statistical roll-ups. Example: Set Test Elapsed Time 11655.9 Performance test times are output in the CCI logs and are captured in MetaCI instead of the "total elapsed time" measured by Robot Framework. The Robot "test message" is also updated.""" builtins = BuiltIn() try: seconds = float(elapsedtime) except ValueError: seconds = timestr_to_secs(elapsedtime) assert seconds is not None builtins.set_test_message(f"Elapsed time set by test : {seconds}") builtins.set_tags("cci_metric_elapsed_time") builtins.set_test_variable("${cci_metric_elapsed_time}", seconds) def set_test_metric(self, metric: str, value=None): """This keyword captures any metric for performance monitoring. For example: number of queries, rows processed, CPU usage, etc. The keyword takes a metric name, which can be any string, and a value, which can be any number. Using this keyword will automatically add the tag cci_metric to the test case and ${cci_metric_<metric_name>} to the test's variables. These permit downstream processing in tools like CCI and MetaCI. cci_metric is not included in Robot's html statistical roll-ups. Example: | Set Test Metric Max_CPU_Percent 30 Performance test metrics are output in the CCI logs, log.html and output.xml. MetaCI captures them but does not currently have a user interface for displaying them.""" builtins = BuiltIn() value = float(value) builtins.set_tags("cci_metric") builtins.set_test_variable("${cci_metric_%s}" % metric, value) @capture_screenshot_on_error def input_form_data(self, *args): """Fill in one or more labeled input fields fields with data Arguments should be pairs of field labels and values. Labels for required fields should not include the asterisk. Labels must be exact, including case. This keyword uses the keyword *Locate Element by Label* to locate elements. More details about how elements are found are in the documentation for that keyword. For most input form fields the actual value string will be used. For a checkbox, passing the value "checked" will check the checkbox and any other value will uncheck it. Using "unchecked" is recommended for clarity. Example: | Input form data | ... Opportunity Name The big one # required text field | ... Amount 1b # currency field | ... Close Date 4/01/2022 # date field | ... Private checked # checkbox | ... Type New Customer # combobox | ... Primary Campaign Source The Big Campaign # picklist This keyword will eventually replace the "populate form" keyword once it has been more thoroughly tested in production. """ it = iter(args) errors = [] for label, value in list(zip(it, it)): # this uses our custom "label" locator strategy locator = f"label:{label}" # FIXME: we should probably only wait for the first label; # after that we can assume the fields have been rendered # so that we fail quickly if we can't find the element element = self.selenium.get_webelement(locator) handler = get_form_handler(element, locator) try: if handler: handler.set(value) else: raise Exception( f"No form handler found for tag '{element.tag_name}'" ) except Exception as e: errors.append(f"{label}: {str(e)}") if errors: message = "There were errors with the following fields:\n" message += "\n".join(errors) raise Exception(message) # FIXME: maybe we should automatically set the focus to some # other element to trigger any event handlers on the last # element? But what should we set the focus to? def locate_element_by_label(self, browser, locator, tag, constraints): """Find a lightning component, input, or textarea based on a label If the component is inside a fieldset, the fieldset label can be prefixed to the label with a double colon in order to disambiguate the label. (eg: Other address::First Name) If the label is inside nested ligntning components (eg: ``<lightning-input>...<lightning-combobox>...<label>``), the lightning component closest to the label will be returned (in this case, ``lightning-combobox``). If a lightning component cannot be found for the label, an attempt will be made to find an input or textarea associated with the label. This is registered as a custom locator strategy named "label" Example: The following example is for a form with a formset named "Expected Delivery Date", and inside of that a date input field with a label of "Date". These examples produce identical results: | ${element}= Locate element by label Expected Delivery Date::Date | ${element}= Get webelement label:Expected Delivery Date::Date """ if "::" in locator: fieldset, label = [x.strip() for x in locator.split("::", 1)] fieldset_prefix = f'//fieldset[.//*[.="{fieldset}"]]' else: label = locator fieldset_prefix = "" xpath = fieldset_prefix + ( # a label with the given text, optionally with a leading # or trailing "*" (ie: required field) f'//label[.="{label}" or .="*{label}" or .="{label}*"]' # then find the nearest ancestor lightning component '/ancestor::*[starts-with(local-name(), "lightning-")][1]' ) elements = browser.find_elements_by_xpath(xpath) if not elements: # fall back to finding an input or textarea based on the 'for' # attribute of a label xpath = fieldset_prefix + ( "//*[self::input or self::textarea]" f'[@id=string(//label[.="{label}" or .="*{label}" or .="{label}*"]/@for)]' ) elements = browser.find_elements_by_xpath(xpath) return elements def _duration(start_date: str, end_date: str, record: dict): try: start_date = parse_date(start_date) end_date = parse_date(end_date) except (ParserError, TypeError) as e: raise Exception(f"Date parse error: {e} in record {record}") duration = end_date - start_date return duration.total_seconds()
click_related_item_link
usersusernameorganizations_api_test.go
package userorganization import ( "testing" "github.com/stretchr/testify/assert" ) func TestFilterOrgs(t *testing.T) { type testcase struct { scopes []string orgs []string expected []string } testcases := []testcase{
{orgs: []string{"parentorg.suborg1", "parentorg.suborg2"}, scopes: []string{"user:organizations:parentorg"}, expected: []string{"parentorg.suborg1", "parentorg.suborg2"}}, {orgs: []string{"parentorg.suborg1.child", "parentorg.suborg2.child"}, scopes: []string{"user:organizations:parentorg.suborg1", "user:organizations:parentorg.suborg2"}, expected: []string{"parentorg.suborg1.child", "parentorg.suborg2.child"}}, } for _, test := range testcases { assert.Equal(t, test.expected, filterOrgs(test.orgs, test.scopes), "Failed") } }
{orgs: []string{}, scopes: []string{}, expected: []string{}}, {orgs: []string{"parentorg", "parentorg.suborg1", "parentorg.suborg2"}, scopes: []string{"user:organizations:parentorg"}, expected: []string{"parentorg", "parentorg.suborg1", "parentorg.suborg2"}}, {orgs: []string{"parentorg", "parentorg.suborg1", "parentorg.suborg2"}, scopes: []string{"user:organizations:parent"}, expected: []string{}},
1566862777462-Initial.ts
// Imports import { MigrationInterface, QueryRunner, Table, TableForeignKey, } from 'typeorm'; // Migration export default class Initial1566862777462 implements MigrationInterface { public async up(queryRunner: QueryRunner): Promise<void> { // Tables const userTable = new Table({ name: 'users', columns: [ { type: 'varchar', name: 'id', isPrimary: true, isNullable: false, length: '16', }, { // Column name name: 'createdAt', // Columnn type type: 'datetime', // Non-nullable isNullable: false, // Default value default: "datetime('now')", }, { // Column name name: 'lastModifiedAt', // Columnn type type: 'datetime', // Non-nullable isNullable: false, // Default value default: "datetime('now')", }, { type: 'varchar', name: 'firstName', isNullable: false, length: '96', }, { type: 'varchar', name: 'lastName', isNullable: false, length: '96', }, { type: 'varchar', name: 'emailAddress', isUnique: true, isNullable: false, length: '127', }, { type: 'varchar', name: 'password', isNullable: false, },
const courseTable = new Table({ name: 'courses', columns: [ { type: 'varchar', name: 'id', isPrimary: true, isNullable: false, length: '16', }, { // Column name name: 'createdAt', // Columnn type type: 'datetime', // Non-nullable isNullable: false, // Default value default: "datetime('now')", }, { // Column name name: 'lastModifiedAt', // Columnn type type: 'datetime', // Non-nullable isNullable: false, // Default value default: "datetime('now')", }, { type: 'varchar', name: 'title', isNullable: false, length: '127', }, { type: 'text', name: 'description', isNullable: false, }, { type: 'varchar', name: 'estimatedTime', isNullable: true, }, { type: 'varchar', name: 'materialsNeeded', isNullable: true, }, { type: 'varchar', name: 'creatorId', isNullable: false, length: '16', }, ], }); // Foreign keys const creatorFk = new TableForeignKey({ columnNames: ['creatorId'], referencedColumnNames: ['id'], referencedTableName: 'users', onDelete: 'CASCADE', }); // Create tables await queryRunner.createTable(userTable); await queryRunner.createTable(courseTable); // Create foreign key await queryRunner.createForeignKey(courseTable, creatorFk); } public async down(queryRunner: QueryRunner): Promise<void> { // Get user and course tables const userTable = await queryRunner.getTable('users'); const courseTable = await queryRunner.getTable('courses'); // Get foreign key const creatorFk = courseTable?.foreignKeys.find((fk) => fk.columnNames.includes('creatorId') ); // Drop foreign key if present if (courseTable && creatorFk) await queryRunner.dropForeignKey(courseTable, creatorFk); // Drop tables if present if (courseTable) await queryRunner.dropTable(courseTable); if (userTable) await queryRunner.dropTable(userTable); } }
], });
packs.client.config.js
(function () { 'use strict'; angular .module('packs') .run(menuConfig); menuConfig.$inject = ['menuService']; function
(menuService) { // Set top bar menu items /*menuService.addMenuItem('topbar', { title: 'Packs', state: 'packs', type: 'dropdown', roles: ['admin'] }); // Add the dropdown list item menuService.addSubMenuItem('topbar', 'packs', { title: 'List Packs', state: 'packs.list' }); // Add the dropdown create item menuService.addSubMenuItem('topbar', 'packs', { title: 'Create Pack', state: 'packs.create' });*/ } }());
menuConfig
manualDel.py
from cs50 import SQL db = SQL("sqlite:///immuns.db") global currentUser def
(number, curUser): stem = db.execute("SELECT * FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number) for stoop in stem: comm = stoop["committee"] db.execute("UPDATE generalList SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE generalList SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) if comm[-2:] == "MS": db.execute("UPDATE msen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE mssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE msen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE mssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) elif comm[-2:] == "HS": db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE hssp SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE hssp SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) else: db.execute("UPDATE hsen SET delegate_name = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("UPDATE hsen SET delegate_school = '' WHERE committee=:com AND country=:con", com=stoop["committee"] ,con=stoop["country"]) db.execute("DELETE FROM :dataBase WHERE id=:ids", dataBase=curUser, ids=number) currentUser = "GuillermoLopezIndividualGuillermo" numberList = [1] for number in numberList: manualDel(number, currentUser)
manualDel
VtkRenderer.py
import vtk from numpy import random import numpy as np import vtk.util.numpy_support as converter import time import cv2 import itertools class VtkText: def __init__(self, text, pos): self.text = text self.pos = pos def get_vtk_text(self): txt = vtk.vtkTextActor() txt.SetInput(self.text) txtprop=txt.GetTextProperty() txtprop.SetFontFamilyToArial() txtprop.SetFontSize(18) txtprop.SetColor(1,1,1) txt.SetDisplayPosition(*self.pos) return txt class VtkLine: def __init__(self, p0, p1): self.p0 = p0 self.p1 = p1 def get_vtk_line(self): source = vtk.vtkLineSource() source.SetPoint1(self.p0) source.SetPoint2(self.p1) # mapper mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(source.GetOutputPort()) # actor actor = vtk.vtkActor() actor.SetMapper(mapper) # assign actor to the renderer return actor class VtkPlane: def __init__(self, norm, xyz): self.norm = norm self.xyz = xyz def get_vtk_plane(self, side_len=25): # cube = vtk.vtkCubeSource() # cube.SetXLength(side_len) # cube.SetYLength(side_len) # cube.SetZLength(side_len) # cube.SetCenter(*self.pos) cube = vtk.vtkSphereSource() cube.SetThetaResolution(100) cube.SetPhiResolution(100) cube.SetRadius(side_len) cube.SetCenter(*self.xyz) cubeMapper = vtk.vtkPolyDataMapper() cubeMapper.SetInputConnection(cube.GetOutputPort()) plane = vtk.vtkPlane() plane.SetOrigin(*self.xyz) plane.SetNormal(*self.norm) #create cutter cutter = vtk.vtkCutter() cutter.SetCutFunction(plane) cutter.SetInputConnection(cube.GetOutputPort()) cutter.Update() cutStrips = vtk.vtkStripper() cutStrips.SetInputConnection(cutter.GetOutputPort()) cutStrips.Update() cutPoly = vtk.vtkPolyData() cutPoly.SetPoints((cutStrips.GetOutput()).GetPoints()) cutPoly.SetPolys((cutStrips.GetOutput()).GetLines()) cutMapper = vtk.vtkPolyDataMapper() cutMapper.SetInput(cutPoly) cutActor = vtk.vtkActor() cutActor.GetProperty().SetColor(1, 1, 1) cutActor.SetMapper(cutMapper) return cutActor class VtkImage: def __init__(self, im): self.im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) def get_vtk_image(self): importer = vtk.vtkImageImport() importer.SetDataSpacing(1,1,1) importer.SetDataOrigin(0,0,0) importer.SetWholeExtent(0, self.im.shape[1] - 1, 0, self.im.shape[0] - 1, 0, 0) importer.SetDataExtentToWholeExtent() importer.SetDataScalarTypeToUnsignedChar() importer.SetNumberOfScalarComponents(self.im.shape[2]) importer.SetImportVoidPointer(self.im) importer.Update() flipY = vtk.vtkImageFlip() flipY.SetFilteredAxis(1) flipY.SetInputConnection(importer.GetOutputPort()) flipY.Update() yActor = vtk.vtkImageActor() yActor.SetInput(flipY.GetOutput()) return yActor class VtkEllipsoid: def __init__(self, T): self.T = (T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], T[3,0], T[3,1], T[3,2], T[3,3]) def get_vtk_ellipsoid(self): self.transformMatrix = vtk.vtkMatrix4x4() self.transformMatrix.DeepCopy(self.T) transform = vtk.vtkTransform() transform.SetMatrix(self.transformMatrix) self.source = vtk.vtkSphereSource() self.source.SetRadius(1.0) self.source.SetCenter(0.0,0.0,0.0) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetTransform(transform) transformFilter.SetInputConnection(self.source.GetOutputPort()) transformFilter.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(transformFilter.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(0.8) actor.SetMapper(mapper) # assign actor to the renderer return actor class VtkBoundingBox: def __init__(self, properties): # (x, y) is the center-back of the car (x, y, z, l, w) = tuple(properties[:5]) h = 1 x = [x, x+l] y = [y-w/2., y+w/2.] z = [z-h/2., z+h/2.] self.bounds = (x[0], x[1], y[0], y[1], z[0], z[1]) self.actor = None self.source = None def get_vtk_box(self, rot = 0): # create source source = vtk.vtkCubeSource() source.SetBounds(self.bounds) self.source = source # mapper mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(source.GetOutputPort()) # actor actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetRepresentationToWireframe() actor.GetProperty().SetLineWidth(1) actor.GetProperty().LightingOff() actor.SetOrigin(source.GetCenter()) actor.RotateZ(rot) self.actor = actor # assign actor to the renderer return actor """ Uses VTK to render a point cloud based on intensities, which might be floating point numbers or RGB values. Disclaimer: This function passes points to vtk, so make sure that your data does not get deallocated by python. Somethings are copied too. It's not really an efficient function and it's a slight miracle that it even works. For internal VTK debugging, here's the layout of things: - vtkPoints consist of the x,y,z coordinates. Pass in an array of size m x 3 (where m is the number of points) - vtkCells tells vtk how to render the points. It is formatted as "1 1 1 2 1 3 ... 1 m' where the 1 tells how many points it should consider in a surface and the even element says which point id to use. The function build_vtk_polydata will do the assembling magic. Then you can call get_vtk_color_cloud or get_vtk_cloud based on how you want to color map each point. """ class VtkPointCloud: def
(self, xyz, intensity): self.xyz = np.ascontiguousarray(xyz) self.intensity = np.ascontiguousarray(intensity) num_points = self.xyz.shape[0] np_cells_A = np.ones(num_points,dtype=np.int64) np_cells_B = np.arange(0,num_points,dtype=np.int64) self.np_cells = np.empty(2*num_points,dtype=np.int64) self.np_cells[::2] = np_cells_A self.np_cells[1::2] = np_cells_B self.actor = None def build_vtk_polydata(self): vtkPolyData = vtk.vtkPolyData() vtkPoints = vtk.vtkPoints() vtkCells = vtk.vtkCellArray() vtkPolyData.SetPoints(vtkPoints) vtkPolyData.SetVerts(vtkCells) num_points = self.xyz.shape[0] vtk_data = converter.numpy_to_vtk(self.xyz) vtkPoints.SetNumberOfPoints(num_points) vtkPoints.SetData(vtk_data) vtkCells.SetCells(num_points, converter.numpy_to_vtkIdTypeArray(self.np_cells, deep=1)) return (vtkPolyData, vtkPoints, vtkCells) def get_vtk_color_cloud(self): assert(self.intensity.shape[1] == 3) (vtkPolyData, vtkPoints, vtkCells) = self.build_vtk_polydata() self.intensity = self.intensity.astype(np.uint8) vtk_color_data = converter.numpy_to_vtk(self.intensity) vtk_color_data.SetName('ColorArray') vtkPolyData.GetPointData().SetScalars(vtk_color_data) vtkPolyData.GetPointData().SetActiveScalars('ColorArray') mapper = vtk.vtkPolyDataMapper() mapper.SetInput(vtkPolyData) vtkActor = vtk.vtkActor() vtkActor.SetMapper(mapper) self.actor = vtkActor return vtkActor def get_vtk_cloud(self, zMin=-10.0,zMax=10.0): assert( len(self.intensity.shape) == 1) (vtkPolyData, vtkPoints, vtkCells) = self.build_vtk_polydata() self.intensity == self.intensity.astype(np.float32) vtk_intensity_data = converter.numpy_to_vtk(self.intensity) vtk_intensity_data.SetName('DepthArray') vtkPolyData.GetPointData().SetScalars(vtk_intensity_data) num_points = self.xyz.shape[0] #vtkDepth = vtk.vtkFloatArray() #vtkDepth.SetName('DepthArray') #vtkPolyData.GetPointData().SetScalars(vtkDepth) #vtkDepth.SetVoidArray(self.intensity, num_points, 1) vtkPolyData.GetPointData().SetActiveScalars('DepthArray') mapper = vtk.vtkPolyDataMapper() mapper.SetInput(vtkPolyData) mapper.SetColorModeToDefault() mapper.SetScalarRange(zMin, zMax) mapper.SetScalarVisibility(1) vtkActor = vtk.vtkActor() vtkActor.SetMapper(mapper) self.actor = vtkActor return vtkActor ############# sample callback setup ############### class vtkTimerCallback(): def __init__(self): pass def execute(self,obj,event): t = time.time() data = 40*(random.random((60000,3))-0.5) pointCloud = VtkPointCloud(data, data[:,2]) iren = obj iren.GetRenderWindow().GetRenderers().GetFirstRenderer().RemoveActor(self.actor) self.actor = pointCloud.get_vtk_cloud() iren.GetRenderWindow().GetRenderers().GetFirstRenderer().AddActor(self.actor) iren.GetRenderWindow().Render() print time.time() - t if __name__ == '__main__': data = 40*(random.random((600,3))-0.5) pointCloud = VtkPointCloud(data, data[:,2]) actor = pointCloud.get_vtk_cloud() # Renderer renderer = vtk.vtkRenderer() renderer.AddActor(actor) renderer.SetBackground(0.0, 0.0, 0.) renderer.ResetCamera() # Render Window renderWindow = vtk.vtkRenderWindow() renderWindow.SetSize(600,600) renderWindow.AddRenderer(renderer) # Interactor renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) # Begin Interaction renderWindow.Render() renderWindowInteractor.Initialize() cb = vtkTimerCallback() cb.actor = actor renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = renderWindowInteractor.CreateRepeatingTimer(50) renderWindowInteractor.Start()
__init__
test_parse_gctx.py
import logging import unittest import os import pandas as pd import numpy as np import h5py import pandas.util.testing as pandas_testing import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger import cmapPy.pandasGEXpress.GCToo as GCToo import cmapPy.pandasGEXpress.parse_gctx as parse_gctx import cmapPy.pandasGEXpress.mini_gctoo_for_testing as mini_gctoo_for_testing import cmapPy.pandasGEXpress.subset_gctoo as subset_gctoo import cmapPy.pandasGEXpress.write_gctx as write_gctx __author__ = "Oana Enache" __email__ = "[email protected]" FUNCTIONAL_TESTS_PATH = "cmapPy/pandasGEXpress/tests/functional_tests/" logger = logging.getLogger(setup_logger.LOGGER_NAME) version_node = "version" rid_node = "/0/META/ROW/id" cid_node = "/0/META/COL/id" data_node = "/0/DATA/0/matrix" row_meta_group_node = "/0/META/ROW" col_meta_group_node = "/0/META/COL" class MockHdf5Dset(object): def __init__(self, data_list, dtype): self.data_list = data_list self.shape = (len(data_list),) self.dtype = dtype def read_direct(self, dest): for i in range(len(dest)): dest[i] = self.data_list[i] class TestParseGctx(unittest.TestCase): def test_parse(self): # parse whole thing mg1 = mini_gctoo_for_testing.make() mg2 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx") pandas_testing.assert_frame_equal(mg1.data_df, mg2.data_df) pandas_testing.assert_frame_equal(mg1.row_metadata_df, mg2.row_metadata_df) pandas_testing.assert_frame_equal(mg1.col_metadata_df, mg2.col_metadata_df) # test with string rid/cid test_rids = ['LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33', 'LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'] test_cids = ['LJP007_MCF7_24H:TRT_POSCON:BRD-A61304759:10'] mg3 = subset_gctoo.subset_gctoo(mg1, rid=test_rids, cid=test_cids) mg4 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", rid=test_rids, cid=test_cids) pandas_testing.assert_frame_equal(mg3.data_df, mg4.data_df) pandas_testing.assert_frame_equal(mg3.row_metadata_df, mg4.row_metadata_df) pandas_testing.assert_frame_equal(mg3.col_metadata_df, mg4.col_metadata_df) # first, make & write out temp version of mini_gctoo with int rids/cids new_mg = mini_gctoo_for_testing.make(convert_neg_666=False) int_indexed_data_df = new_mg.data_df.copy() int_indexed_data_df.index = [str(i) for i in range(0, 6)] int_indexed_data_df.columns = [str(i) for i in range(10, 16)] int_indexed_row_meta = new_mg.row_metadata_df.copy() int_indexed_row_meta.index = int_indexed_data_df.index int_indexed_col_meta = new_mg.col_metadata_df.copy() int_indexed_col_meta.index = int_indexed_data_df.columns int_indexed_gctoo = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta, col_metadata_df=int_indexed_col_meta) write_gctx.write(int_indexed_gctoo, "int_indexed_mini_gctoo.gctx") # test with numeric (repr as string) rid/cid mg5 = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta, col_metadata_df=int_indexed_col_meta) mg5 = subset_gctoo.subset_gctoo(mg5, row_bool=[True, False, True, False, True, False], col_bool=[True, False, False, True, True, True]) mg5.data_df.index.name = "rid" mg5.data_df.columns.name = "cid" mg5.row_metadata_df.index.name = "rid" mg5.row_metadata_df.columns.name = "rhd" mg5.col_metadata_df.index.name = "cid" mg5.col_metadata_df.columns.name = "chd" mg6 = parse_gctx.parse("int_indexed_mini_gctoo.gctx", rid=["0", "2", "4"], cid=["10", "13", "14", "15"], convert_neg_666=False) os.remove("int_indexed_mini_gctoo.gctx") pandas_testing.assert_frame_equal(mg5.data_df, mg6.data_df) pandas_testing.assert_frame_equal(mg5.row_metadata_df, mg6.row_metadata_df) pandas_testing.assert_frame_equal(mg5.col_metadata_df, mg6.col_metadata_df) # test with ridx/cidx mg7 = subset_gctoo.subset_gctoo(mg1, rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'], cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666']) mg8 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", ridx=[4], cidx=[4]) pandas_testing.assert_frame_equal(mg7.data_df, mg8.data_df) pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg8.row_metadata_df) pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg8.col_metadata_df) # test with rid/cidx mg9 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'], cidx=[4]) pandas_testing.assert_frame_equal(mg7.data_df, mg9.data_df) pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg9.row_metadata_df) pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg9.col_metadata_df) # test with ridx/cid mg10 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", ridx=[4], cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666']) pandas_testing.assert_frame_equal(mg7.data_df, mg10.data_df) pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg10.row_metadata_df) pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg10.col_metadata_df) # test with row_meta_only mg11 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", row_meta_only=True) pandas_testing.assert_frame_equal(mg11, mg1.row_metadata_df) # test with col_meta_only mg12 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", col_meta_only=True) pandas_testing.assert_frame_equal(mg12, mg1.col_metadata_df) # test with sort_col_meta False and cidx mg13 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", cidx = [4,1,3], sort_col_meta= False) pandas_testing.assert_frame_equal(mg13.data_df, mg1.data_df.iloc[:, [4,1,3]]) pandas_testing.assert_frame_equal(mg13.col_metadata_df, mg1.col_metadata_df.iloc[[4,1,3],:]) pandas_testing.assert_frame_equal(mg13.row_metadata_df, mg1.row_metadata_df) # test with sort_row_meta False and ridx mg14 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", ridx = [3,0,1], sort_row_meta= False) pandas_testing.assert_frame_equal(mg14.data_df, mg1.data_df.iloc[[3,0,1],:]) pandas_testing.assert_frame_equal(mg14.col_metadata_df, mg1.col_metadata_df) pandas_testing.assert_frame_equal(mg14.row_metadata_df, mg1.row_metadata_df.iloc[[3,0,1],:]) # test with sort_col_meta False and cidx and col_meta_only mg15 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", cidx = [4,1,3], sort_col_meta= False, col_meta_only=True) pandas_testing.assert_frame_equal(mg15, mg1.col_metadata_df.iloc[[4,1,3],:]) # test with sort_row_meta False and ridx and row_meta_only mg16 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", ridx = [3,0,1], sort_row_meta= False, row_meta_only=True) pandas_testing.assert_frame_equal(mg16, mg1.row_metadata_df.iloc[[3,0,1],:]) # test with sort_col_meta False and cid cid_unsorted = ['LJP007_MCF7_24H:TRT_POSCON:BRD-K81418486:10','LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33'] mg17 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", cid = cid_unsorted, sort_col_meta= False) pandas_testing.assert_frame_equal(mg17.data_df, mg1.data_df.iloc[:, [2,0]]) pandas_testing.assert_frame_equal(mg17.col_metadata_df, mg1.col_metadata_df.iloc[[2,0],:]) pandas_testing.assert_frame_equal(mg17.row_metadata_df, mg1.row_metadata_df) # test with sort_row_meta False and rid rid_unsorted = ['LJP007_MCF7_24H:TRT_CP:BRD-K64857848:10', 'MISC003_A375_24H:TRT_CP:BRD-K93918653:3.33'] mg18 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", rid = rid_unsorted, sort_row_meta=False) pandas_testing.assert_frame_equal(mg18.data_df, mg1.data_df.iloc[[5,1], :]) pandas_testing.assert_frame_equal(mg18.col_metadata_df, mg1.col_metadata_df) pandas_testing.assert_frame_equal(mg18.row_metadata_df, mg1.row_metadata_df.iloc[[5,1],:]) def test_parse_rid_as_entrez_id(self): input_file = "cmapPy/pandasGEXpress/tests/functional_tests//test_parse_gctx_rid_entrez_id.gctx" g = parse_gctx.parse(input_file) self.assertEqual((5, 5), g.data_df.shape) logger.debug("g.data_df.index: {}".format(g.data_df.index)) my_rids = ["5720", "55847", "7416"] g = parse_gctx.parse(input_file, rid=my_rids) self.assertEqual((3, 5), g.data_df.shape) logger.debug("g.data_df.index: {}".format(g.data_df.index)) my_rids = [str(x) for x in my_rids] logger.debug("using rid as str (mismatched type) - my_rids: {}".format(my_rids)) g = parse_gctx.parse(input_file, rid=my_rids) self.assertEqual((3, 5), g.data_df.shape) logger.debug("g.data_df.index: {}".format(g.data_df.index)) def test_check_and_order_id_inputs(self): ridx = [0, 1] cidx = [2, 1] rid = ["a", "b", "c"] cid = ["l", "m", "n", "o"] row_meta = pd.DataFrame(index=["b", "c", "a", "d"]) col_meta = pd.DataFrame(index=["l", "m", "n", "o", "p", "q"]) # case 1: row and col lists are populated and same type self.assertEqual((sorted(ridx), sorted(cidx)), parse_gctx.check_and_order_id_inputs(None, ridx, None, cidx, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True)) # case 2: row & col lists are populated, but of different types self.assertEqual((sorted(ridx), [0, 1, 2, 3]), parse_gctx.check_and_order_id_inputs(None, ridx, cid, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True)) # case 3: row list and col lists are both None self.assertEqual(([0, 1, 2, 3], [0, 1, 2, 3, 4, 5]), parse_gctx.check_and_order_id_inputs(None, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True)) # case 4: row list is populated, col list is None self.assertEqual(([0, 1, 2], [0, 1, 2, 3, 4, 5]), parse_gctx.check_and_order_id_inputs(rid, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True)) def test_check_id_idx_exclusivity(self): ids = ["a", "b", "c"] idx = [0, 1, 2] # case 1: id != None and idx != None with self.assertRaises(Exception) as context: parse_gctx.check_id_idx_exclusivity(ids, idx) self.assertTrue("'id' and 'idx' fields can't both not be None" in str(context.exception)) # case 2: id != None self.assertEqual(("id", ids), parse_gctx.check_id_idx_exclusivity(ids, None)) # case 3: idx != None self.assertEqual(("idx", idx), parse_gctx.check_id_idx_exclusivity(None, idx)) # case 4: id == None & idx == None self.assertEqual((None, []), parse_gctx.check_id_idx_exclusivity(None, None)) def test_parse_metadata_df(self): mini_gctoo = mini_gctoo_for_testing.make() # convert row_metadata to np.nan mini_row_meta = mini_gctoo.row_metadata_df.replace([-666, "-666", -666.0], [np.nan, np.nan, np.nan]) logger.debug("mini_row_meta.shape: {}".format(mini_row_meta.shape)) logger.debug("mini_row_meta.index: {}".format(mini_row_meta.index)) logger.debug("mini_row_meta.columns: {}".format(mini_row_meta.columns)) logger.debug("mini_row_meta.dtypes: {}".format(mini_row_meta.dtypes)) gctx_file = h5py.File("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx", "r") row_dset = gctx_file[row_meta_group_node] col_dset = gctx_file[col_meta_group_node] # with convert_neg_666 row_df = parse_gctx.parse_metadata_df("row", row_dset, True) logger.debug("row_df.dtypes: {}".format(row_df.dtypes)) pandas_testing.assert_frame_equal(mini_row_meta, row_df) # no convert_neg_666 mini_gctoo_with_neg_666 = mini_gctoo_for_testing.make(convert_neg_666=False) col_df = parse_gctx.parse_metadata_df("col", col_dset, False) pandas_testing.assert_frame_equal(mini_gctoo_with_neg_666.col_metadata_df, col_df) # test that ID's are not converted to numeric expected_rids = [str(i) for i in range(3)] row_dset = {"id": MockHdf5Dset(expected_rids, str), "other_meta": MockHdf5Dset(range(3, 6), str)} r = parse_gctx.parse_metadata_df("row", row_dset, True) logger.debug("test that ID's are not converted to numeric - r: {}".format(r)) logger.debug("r.index: {}".format(r.index)) self.assertEqual(set(expected_rids), set(r.index)) def test_replace_666(self): # convert_neg_666 is True row_df = pd.DataFrame([[3, "a"], [-666, "c"], ["-666", -666.0]], index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"]) e_df = pd.DataFrame([[3, "a"], [np.nan, "c"], [np.nan, np.nan]], index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"]) out_df = parse_gctx.replace_666(row_df, convert_neg_666=True) self.assertTrue(e_df.equals(out_df)) # convert_neg_666 is False e_df2 = pd.DataFrame([[3, "a"], ["-666", "c"], ["-666", "-666"]], index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"]) out_df2 = parse_gctx.replace_666(row_df, convert_neg_666=False) self.assertTrue(e_df2.equals(out_df2)) # edge case: if row meta is 1 column of floats row_df3 = pd.DataFrame([[3], [-666], [-666.0]], index=["r1", "r2", "r3"], columns=["rhd3"]) e_df3 = pd.DataFrame([[3], [np.nan], [np.nan]], index=["r1", "r2", "r3"], columns=["rhd3"]) out_df3 = parse_gctx.replace_666(row_df3, convert_neg_666=True) self.assertTrue(e_df3.equals(out_df3)) def test_set_metadata_index_and_column_names(self): mini_gctoo = mini_gctoo_for_testing.make() mini_gctoo.row_metadata_df.index.name = None mini_gctoo.row_metadata_df.columns.name = None mini_gctoo.col_metadata_df.index.name = None mini_gctoo.col_metadata_df.columns.name = None # case 1: dim == "row" parse_gctx.set_metadata_index_and_column_names("row", mini_gctoo.row_metadata_df) self.assertEqual(mini_gctoo.row_metadata_df.index.name, "rid") self.assertEqual(mini_gctoo.row_metadata_df.columns.name, "rhd") # case 2: dim == "col" parse_gctx.set_metadata_index_and_column_names("col", mini_gctoo.col_metadata_df) self.assertEqual(mini_gctoo.col_metadata_df.index.name, "cid") self.assertEqual(mini_gctoo.col_metadata_df.columns.name, "chd") def test_get_ordered_idx(self): mg = mini_gctoo_for_testing.make() # case 1: id_type == None case1 = parse_gctx.get_ordered_idx(None, [], mg.row_metadata_df, sort_idx = True) self.assertEqual(case1, list(range(0, 6)), "Expected ordered idx to be {} but got {}".format(list(range(0, 6)), case1)) # case 2: id_type == "id" case2 = parse_gctx.get_ordered_idx("id", ['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'], mg.col_metadata_df, sort_idx = True) self.assertEqual(case2, [4], "Expected ordered idx to be {} but got {}".format([4], case2)) # case 3: id_type == ridx case3 = parse_gctx.get_ordered_idx("idx", [5, 1, 3], mg.col_metadata_df, sort_idx = True) self.assertEqual(case3, [1, 3, 5], "Expected ordered idx to be {} but got {}".format([1, 3, 5], case3)) def test_parse_data_df(self): mini_data_df = pd.DataFrame([[-0.283359, 0.011270], [0.304119, 1.921061], [0.398655, -0.144652]], index=["200814_at", "218597_s_at", "217140_s_at"], columns=["LJP005_A375_24H:DMSO:-666", "LJP005_A375_24H:BRD-K76908866:10"]) mini_data_df = mini_data_df.astype(np.float32) mini_data_df.index.name = "rid" mini_data_df.columns.name = "cid" # create h5py File instance mini_gctx = h5py.File("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx", "r") data_dset = mini_gctx[data_node] # get relevant metadata fields col_meta = parse_gctx.get_column_metadata("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx") row_meta = parse_gctx.get_row_metadata("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctx_with_metadata_n2x3.gctx") # case 1: no subsetting data_df1 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0, 1], row_meta, col_meta) # note: checks to 3 decimal places pandas_testing.assert_frame_equal(mini_data_df, data_df1, check_exact=False, check_less_precise=True) # case 2: subset; ridx < cidx data_df2 = parse_gctx.parse_data_df(data_dset, [0], [0, 1], row_meta, col_meta) pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0, 1]], data_df2, check_exact=False, check_less_precise=True) # case 3: subset; ridx == cidx data_df3 = parse_gctx.parse_data_df(data_dset, [0], [0], row_meta, col_meta) pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0]], data_df3, check_exact=False, check_less_precise=True) # case 4: subset; ridx > cidx data_df4 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0], row_meta, col_meta) pandas_testing.assert_frame_equal(mini_data_df.iloc[[0, 1, 2], [0]], data_df4, check_exact=False, check_less_precise=True) mini_gctx.close() def test_convert_ids_to_meta_type(self): # happy path id_list = [0, 1, 2] self.assertEqual(int, type(id_list[0])) df = pd.DataFrame({}, index=pd.Series(range(1, 4)).astype(np.int64)) r = parse_gctx.convert_ids_to_meta_type(id_list, df) logger.debug("conversion from regular int to numpy int64 - type(r[0]): {}".format(type(r[0]))) self.assertEqual(np.int64, type(r[0])) id_list = [str(i) for i in range(3)] r = parse_gctx.convert_ids_to_meta_type(id_list, df) logger.debug("conversion from str to numpy int64 - type(r[0]): {}".format(type(r[0]))) self.assertEqual(np.int64, type(r[0])) # unhappy path id_list[0] = "a" with self.assertRaises(Exception) as context: parse_gctx.convert_ids_to_meta_type(id_list, df) logger.debug("context.exception: {}".format(context.exception)) self.assertIn( "The type of the id_list (rid or cid) being used to subset the data is not compatible with the metadata id's in the file", str(context.exception)) def test_check_idx_validity(self):
def test_check_id_validity(self): id_list = ["a", "b", "c"] df = pd.DataFrame({}, index=["a", "b", "c", "d"]) parse_gctx.check_id_validity(id_list, df) id_list[0] = "z" with self.assertRaises(Exception) as context: parse_gctx.check_id_validity(id_list, df) logger.debug("context.exception: {}".format(context.exception)) self.assertIn( "some of the ids being used to subset the data are not present in the metadata for the file being parsed", str(context.exception)) if __name__ == "__main__": setup_logger.setup(verbose=True) unittest.main()
id_list = [0,1,2] df = pd.DataFrame({}, index=range(5)) logger.debug("df.shape: {}".format(df.shape)) parse_gctx.check_idx_validity(id_list, df, sort_id = True) id_list[0] = -1 with self.assertRaises(Exception) as context: parse_gctx.check_idx_validity(id_list, df, sort_id = True) logger.debug("context.exception: {}".format(context.exception)) self.assertIn("some of indexes being used to subset the data are not valid", str(context.exception)) self.assertIn("[-1]", str(context.exception)) invalid_high = df.shape[0] + 1 id_list[0] = invalid_high with self.assertRaises(Exception) as context: parse_gctx.check_idx_validity(id_list, df, sort_id = True) logger.debug("context.exception: {}".format(context.exception)) self.assertIn("some of indexes being used to subset the data are not valid", str(context.exception)) self.assertIn("[{}]".format(invalid_high), str(context.exception))
find-subnets.py
#!/usr/bin/env python3 import sys import json import urllib.request def find_subnet_rules(env, product, subnets): all_subnets = get_all_subnets(env, product, subnets) rule_names = [x['rule_name'] for x in all_subnets] subnet_ids = [x['subnet_id'] for x in all_subnets] result = {} result['subnets'] = ';'.join(subnet_ids) result['rule_names'] = ';'.join(rule_names) return result def get_all_subnets(env, product, subnets): environments = subnets['environments'] env_subnets_list_of_lists = [environment['subnets'] for environment in environments if environment['name'] == env] applications = subnets['applications'] app_subnets_list_of_lists = [application['subnets'] for application in applications if application['name'] == product] if len(env_subnets_list_of_lists) == 0 and len(app_subnets_list_of_lists) == 0: # terraform will say "command "python3" failed with no error message" # still better to fail here I think print('No subnets found') sys.exit(1) env_subnets = env_subnets_list_of_lists[0] if len( env_subnets_list_of_lists) > 0 else [] app_subs = app_subnets_list_of_lists[0] if len( app_subnets_list_of_lists) > 0 else [] all_subnets = env_subnets + app_subs return all_subnets # always only one line from terraform # {"env":"idam-aat","product":"idam-idm-aat", "github_token": "example"} line = sys.stdin.readline() query = json.loads(line) subnets_filename = query['subnets_filename'] github_token = query['github_token'] url = 'https://raw.githubusercontent.com/hmcts/cnp-database-subnet-whitelisting/master/%s' % subnets_filename req = urllib.request.Request( url=url, headers={'Authorization': 'Bearer ' + github_token}) with urllib.request.urlopen(req) as f: subnets_str = f.read().decode('utf-8') subnets = json.loads(subnets_str)
env = query['env'] product = query['product'] result = find_subnet_rules(env, product, subnets) print(json.dumps(result))
tvmi.rs
use std::{env, fs, process::exit}; use tinyvm::context::Program; fn main()
fn read_to_string_with_possible_extension( filename: &str, extension: &str, ) -> Result<String, std::io::Error> { match fs::read_to_string(filename) { Ok(s) => return Ok(s), Err(error) => match error.kind() { std::io::ErrorKind::NotFound => (), _ => return Err(error), }, }; fs::read_to_string(filename.to_owned() + extension) }
{ let mut args = env::args(); if args.len() != 2 { println!("Usage: `tvmi file`"); exit(1); } let filename = args.nth(1).unwrap(); let source = match read_to_string_with_possible_extension(&filename, ".vm") { Ok(s) => s, Err(_) => { println!("Error reading file {}", filename); exit(1); } }; let program = match Program::load(source) { Ok(p) => p, Err(e) => { println!("Error {:?}", e); exit(1); } }; match program.run() { Ok(_) => {}, Err(e) => { println!("Error executing program: {:?}", e); exit(1); } } }
Editor.py
########################################################################## # # Copyright (c) 2011-2012, John Haddon. All rights reserved. # Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import types import IECore import Gaffer import GafferUI from Qt import QtCore from Qt import QtWidgets class _EditorMetaclass( Gaffer.Trackable.__class__ ) : def __call__( cls, *args, **kw ) : instance = type.__call__( cls, *args, **kw ) while hasattr( cls, "instanceCreatedSignal" ) : cls.instanceCreatedSignal()( instance ) cls = cls.__bases__[0] return instance ## Base class for UI components which display or manipulate a ScriptNode # or its children. These make up the tabs in the UI layout. class Editor( GafferUI.Widget ) : __metaclass__ = _EditorMetaclass def __init__( self, topLevelWidget, scriptNode, **kw ) : GafferUI.Widget.__init__( self, topLevelWidget, **kw ) self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus ) assert( isinstance( scriptNode, Gaffer.ScriptNode ) ) self.__scriptNode = scriptNode self.__context = None self.__title = "" self.__titleChangedSignal = GafferUI.WidgetSignal() self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False ) self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False ) self.__setContextInternal( scriptNode.context(), callUpdate=False ) def scriptNode( self ) : return self.__scriptNode ## May be called to explicitly set the title for this editor. The # editor itself is not responsible for displaying the title - this # is left to the enclosing ui. def setTitle( self, title ) : if title == self.__title : return self.__title = title self.titleChangedSignal()( self ) ## May be overridden to provide sensible default behaviour for # the title, but must return BaseClass.getTitle() if it is non-empty. def getTitle( self ) : if self.__title : return self.__title # if there's no explicit title and a derived class # has overridden getTitle() then we return the empty # string to signify that the derived class is free # to return what it wants c = self.__class__ while c is not Editor : if "getTitle" in c.__dict__ : return "" c = c.__bases__[0] # otherwise we default to using the classname return IECore.CamelCase.toSpaced( self.__class__.__name__ ) ## A signal emitted whenever the title changes. def titleChangedSignal( self ) : return self.__titleChangedSignal ## By default Editors operate in the main context held by the script node. This function # allows an alternative context to be provided, making it possible for an editor to # display itself at a custom frame (or with any other context modification). def setContext( self, context ) : self.__setContextInternal( context, callUpdate=True ) def getContext( self ) : return self.__context def __setContextInternal( self, context, callUpdate ) : assert( isinstance( context, ( Gaffer.Context, types.NoneType ) ) ) previousContext = self.__context self.__context = context if self.__context is not None :
else : ## \todo I'm not sure why this code allows a None context - surely we # should always have a valid one? self.__contextChangedConnection = None if callUpdate : modifiedItems = set() if previousContext is not None : modifiedItems |= set( previousContext.names() ) if self.__context is not None : modifiedItems |= set( self.__context.names() ) self._updateFromContext( modifiedItems ) ## May be implemented by derived classes to update state based on a change of context. # To temporarily suspend calls to this function, use Gaffer.BlockedConnection( self._contextChangedConnection() ). def _updateFromContext( self, modifiedItems ) : pass def _contextChangedConnection( self ) : return self.__contextChangedConnection ## This must be implemented by all derived classes as it is used for serialisation of layouts. # It is not expected that the script being edited is also serialised as part of this operation - # instead the new script will be provided later as a variable named scriptNode. So a suitable # serialisation will look like "GafferUI.Editor( scriptNode )". def __repr__( self ) : raise NotImplementedError def __contextChanged( self, context, key ) : assert( context.isSame( self.getContext() ) ) self._updateFromContext( set( [ key ] ) ) @classmethod def types( cls ) : return cls.__namesToCreators.keys() @classmethod def create( cls, name, scriptNode ) : return cls.__namesToCreators[name]( scriptNode = scriptNode ) @classmethod def registerType( cls, name, creator ) : cls.__namesToCreators[name] = creator __namesToCreators = {} @classmethod def instanceCreatedSignal( cls ) : s = cls.__dict__.get( "__instanceCreatedSignal", None ) if s is not None : return s s = Gaffer.Signal1() setattr( cls, "__instanceCreatedSignal", s ) return s def __enter( self, widget ) : if not isinstance( QtWidgets.QApplication.focusWidget(), ( QtWidgets.QLineEdit, QtWidgets.QPlainTextEdit ) ) : self._qtWidget().setFocus() def __leave( self, widget ) : self._qtWidget().clearFocus()
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
rotation.py
''' 这段代码源于网上 原文请见 https://my.oschina.net/hechunc/blog/3020284 ''' import RPi.GPIO as GPIO import time # 这个类表示单个的SG90模块 class Rotation: frequency=50 #脉冲频率(Hz) delta_theta=0.2 #步进转动间隔(度) min_delay=0.0006 #转动delta_theta的理论耗时(s) max_delay=0.4 #从0转到180的耗时(s) def __init__(self,channel,min_theta,max_theta,init_theta=0): ''' 构造函数: channel: 舵机信号线所连接的树莓派引脚编号(BCM编码) min_theta: 舵机转动的最小角度 max_theta: 舵机转动的最大角度 init_theta: 舵机的初始角度 ''' self.channel=channel if(min_theta<0 or min_theta>180): self.min_theta=0 else: self.min_theta=min_theta if(max_theta<0 or max_theta>180): self.max_theta=180 else: self.max_theta=max_theta if(init_theta<min_theta or init_theta>max_theta): self.init_theta=(self.min_theta+self.max_theta)/2 else: self.init_theta=init_theta #初始角度 #计算最小角度、最大角度的占空比 self.min_dutycycle=2.5+self.min_theta*10/180 self.max_dutycycle=2.5+self.max_theta*10/180 def setup(self): ''' 初始化 ''' GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(self.channel,GPIO.OUT) self.pwm=GPIO.PWM(self.channel,Rotation.frequency) #PWM self.dutycycle=2.5+self.init_theta*10/180 #脉冲占空比的初始值 self.pwm.start(self.dutycycle) #让舵机转到初始位置 time.sleep(Rotation.max_delay) def positiveRotation(self): ''' 正相步进转动,每次调用只转动delta_theta度 ''' self.dutycycle=self.dutycycle+Rotation.delta_theta*10/180 if self.dutycycle>self.max_dutycycle: self.dutycycle=self.max_dutycycle self.pwm.ChangeDutyCycle(self.dutycycle) time.s
time.sleep(Rotation.min_delay) def specifyRotation(self,theta): ''' 转动到指定的角度 ''' if(theta<0 or theta>180): return self.dutycycle=2.5+theta*10/180 self.pwm.ChangeDutyCycle(self.dutycycle) time.sleep(Rotation.max_delay) def cleanup(self): self.pwm.stop() time.sleep(Rotation.min_delay) GPIO.cleanup()
leep(Rotation.min_delay) def reverseRotation(self): ''' 反相转动,每次调用只转动delta_theta度 ''' self.dutycycle=self.dutycycle-Rotation.delta_theta*10/180 if self.dutycycle<self.min_dutycycle: self.dutycycle=self.min_dutycycle self.pwm.ChangeDutyCycle(self.dutycycle)
pages-admin-custom_date-index.3311f547.js
(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([["pages-admin-custom_date-index"],{"0cae":function(n,e,t){"use strict";var a=t("e8ed"),r=t.n(a);r.a},"141e":function(n,e,t){"use strict";t.d(e,"b",(function(){return r})),t.d(e,"c",(function(){return c})),t.d(e,"a",(function(){return a}));var a={uniCalendar:t("aefa").default},r=function(){var n=this,e=n.$createElement,t=n._self._c||e;return t("v-uni-view",[t("uni-calendar",{attrs:{range:!0},on:{change:function(e){arguments[0]=e=n.$handleEvent(e),n.change.apply(void 0,arguments)}}}),t("v-uni-navigator",{staticClass:"back",attrs:{"open-type":"navigateBack","hover-class":"none"}},[n._v("取消")])],1)},c=[]},"29cf":function(n,e,t){"use strict";t.r(e);var a=t("141e"),r=t("3c6f");for(var c in r)"default"!==c&&function(n){t.d(e,n,(function(){return r[n]}))}(c);t("0cae");var i,o=t("f0c5"),u=Object(o["a"])(r["default"],a["b"],a["c"],!1,null,"43372164",null,!1,a["a"],i);e["default"]=u.exports},"3c6f":function(n,e,t){"use strict";t.r(e);var a=t("9ac1"),r=t.n(a);for(var c in a)"default"!==c&&function(n){t.d(e,n,(function(){return a[n]}))}(c);e["default"]=r.a},"9ac1":function(n,e,t){"use strict";var a=t("4ea4");t("99af"),Object.defineProperty(e,"__esModule",{value:!0}),e.default=void 0;var r=a(t("aefa")),c={components:{uniCalendar:r.default},data:function(){return{type:""}},onLoad:function(n){this.type=n.type},methods:{change:function(n){var e=n.range,t=e.before,a=e.after;t&&a&&uni.navigateTo({url:"/pages/admin/statistics/index?type=".concat(this.type,"&before=").concat(t,"&after=").concat(a,"&time=date")})}}};e.default=c},af7c:function(n,e,t){var a=t("24fb");e=a(!1),e.push([n.i,'@charset "UTF-8";\n/**\n * 这里是uni-app内置的常用样式变量\n *\n * uni-app 官方扩展插件及插件市场(https://ext.dcloud.net.cn)上很多三方插件均使用了这些样式变量\n * 如果你是插件开发者,建议你使用scss预处理,并在插件代码中直接使用这些变量(无需 import 这个文件),方便用户通过搭积木的方式开发整体风格一致的App\n *\n */\n/**\n * 如果你是App开发者(插件使用者),你可以通过修改这些变量来定制自己的插件主题,实现自定义主题功能\n *\n * 如果你的项目同样使用了scss预处理,你也可以直接在你的 scss 代码中使用如下变量,同时无需 import 这个文件\n */\n/* 颜色变量 */\n/* 行为相关颜色 */\n/* crmeb颜色变量 */\n/* 背景颜色 */\n/* 边框颜色 */\n/* 尺寸变量 */\n/* 文字尺寸 */\n/* 图片尺寸 */\n/* Border Radius */\n/* 水平间距 */\n/* 垂直间距 */\n/* 透明度 */\n/* 文章场景相关 */.back[data-v-43372164]{height:%?86?%;border:%?1?% solid #e93323;border-radius:%?43?%;margin:%?60?% %?30?%;font-size:%?30?%;line-height:%?84?%;text-align:center;color:#e93323}',""]),n.exports=e},e8ed:function(n,e,t){var a=t("af7c");"string"===typeof a&&(a=[[n.i,a,""]]),a.locals&&(n.exports=a.locals);var r=t("4f06").default;r("19303e0e",a,!0,{sourceMap:!1,shadowMode:!1})}}]);
googleMap.js
// When the window has finished loading create our google map below google.maps.event.addDomListener(window, 'load', init); function init() { // Basic options for a simple Google Map // For more options see: https://developers.google.com/maps/documentation/javascript/reference#MapOptions var mapOptions = { // How zoomed in you want the map to start at (always required) zoom: 14, // The latitude and longitude to center the map (always required) center: new google.maps.LatLng(33.4465154, -86.7318209), // New York // How you would like to style the map. // This is where you would paste any style found on Snazzy Maps. styles: [] }; // Get the HTML DOM element that will contain your map // We are using a div with id="gMap" seen below in the <body>
// Let's also add a marker while we're at it marker = new google.maps.Marker({ map:map, draggable:true, animation: google.maps.Animation.DROP, position: new google.maps.LatLng(33.4465154, -86.7318209), // Change those co-ordinates to yours, to change your location with given location. icon: '' // null = default icon }); }
var mapElement = document.getElementById('gMap'); var map = new google.maps.Map(mapElement, mapOptions);
__init__.py
from .ilsvrc12_dataset import * __all__ =[ "CelebaDataset", "Ilsvrc12Dataset" ]
from .celeba_dataset import *
m2fpn.py
''' This code is based on pytorch_ssd and RFBNet. Details about the modules: TUM - Thinned U-shaped Module MLFPN - Multi-Level Feature Pyramid Network M2Det - Multi-level Multi-scale single-shot object Detector Author: Qijie Zhao ([email protected]) Finished Date: 01/17/2019 ''' import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init import warnings warnings.filterwarnings('ignore') from ..registry import NECKS from ..utils import ConvModule class TUM(nn.Module): def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6, conv_cfg=None, norm_cfg=None ): super(TUM, self).__init__() self.is_smooth = is_smooth self.side_channel = side_channel self.input_planes = input_planes self.planes = 2 * self.input_planes self.first_level = first_level self.scales = scales self.in1 = input_planes + side_channel if not first_level else input_planes self.layers = nn.Sequential() self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)) for i in range(self.scales - 2): if not i == self.scales - 3: self.layers.add_module( '{}'.format(len(self.layers)), ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg) ) else: self.layers.add_module( '{}'.format(len(self.layers)), ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg) ) self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)) self.latlayer = nn.Sequential() for i in range(self.scales - 2): self.latlayer.add_module( '{}'.format(len(self.latlayer)), ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg) ) self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)) if self.is_smooth: smooth = list() for i in range(self.scales - 1): smooth.append( ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg) ) self.smooth = nn.Sequential(*smooth) def _upsample_add(self, x, y, fuse_type='interp'): _, _, H, W = y.size() if fuse_type == 'interp': return F.interpolate(x, size=(H, W), mode='nearest') + y else: raise NotImplementedError # return nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1) def forward(self, x, y): if not self.first_level: x = torch.cat([x, y], 1) conved_feat = [x] for i in range(len(self.layers)): x = self.layers[i](x) conved_feat.append(x) deconved_feat = [self.toplayer[0](conved_feat[-1])] for i in range(len(self.latlayer)): deconved_feat.append( self._upsample_add( deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i]) ) ) if self.is_smooth: smoothed_feat = [deconved_feat[0]] for i in range(len(self.smooth)): smoothed_feat.append( self.smooth[i](deconved_feat[i + 1]) ) return smoothed_feat return deconved_feat class SFAM(nn.Module): def __init__(self, planes, num_levels, num_scales, compress_ratio=16): super(SFAM, self).__init__() self.planes = planes self.num_levels = num_levels self.num_scales = num_scales self.compress_ratio = compress_ratio self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels, self.planes * self.num_levels // 16, 1, 1, 0)] * self.num_scales) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16, self.planes * self.num_levels, 1, 1, 0)] * self.num_scales) self.sigmoid = nn.Sigmoid() self.avgpool = nn.AdaptiveAvgPool2d(1) def forward(self, x): attention_feat = [] for i, _mf in enumerate(x): _tmp_f = self.avgpool(_mf) _tmp_f = self.fc1[i](_tmp_f) _tmp_f = self.relu(_tmp_f) _tmp_f = self.fc2[i](_tmp_f) _tmp_f = self.sigmoid(_tmp_f) attention_feat.append(_mf * _tmp_f) return attention_feat @NECKS.register_module class M2FPN(nn.Module): def __init__(self, num_levels = 8, num_scales = 5, sfam=False, smooth=True, in_channels = [512,2048], out_channels=256, conv_cfg=None, norm_cfg=None): ''' M2Det: Multi-level Multi-scale single-shot object Detector ''' super(M2FPN,self).__init__() self.planes = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_levels = num_levels self.num_scales = num_scales self.sfam = sfam self.smooth = smooth self.in_channels = in_channels self.shallow_out =256 self.deep_out =512 self.construct_modules() def construct_modules(self,): # construct tums for i in range(self.num_levels): if i == 0: setattr(self, 'unet{}'.format(i+1),
side_channel=512)) #side channel isn't fixed. else: setattr(self, 'unet{}'.format(i+1), TUM(first_level=False, input_planes=self.planes//2, is_smooth=self.smooth, scales=self.num_scales, side_channel=self.planes)) self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1) self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1) self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1) self.Norm = nn.BatchNorm2d(256*8) self.leach = nn.ModuleList([ConvModule( self.deep_out+self.shallow_out, self.planes//2, kernel_size=(1,1),stride=(1,1))]*self.num_levels) # construct localization and recognition layers conv_out = nn.ModuleList() for i in range(self.num_scales): conv_out.append(nn.Conv2d(self.planes*self.num_levels, self.planes, 3, 1, 1)) self.conv_out = nn.ModuleList(conv_out) # construct SFAM module if self.sfam: self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self,x): assert len(x)==len(self.in_channels) # loc,conf = list(),list() # base_feats = list() # if 'vgg' in self.net_family: # for k in range(len(self.base)): # x = self.base[k](x) # if k in self.base_out: # base_feats.append(x) # elif 'res' in self.net_family: # base_feats = self.base(x, self.base_out) up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest') base_feature = torch.cat( (self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1 ) # tum_outs is the multi-level multi-scale feature tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')] for i in range(1,self.num_levels,1): tum_outs.append( getattr(self, 'unet{}'.format(i+1))( self.leach[i](base_feature), tum_outs[i-1][-1] ) ) # concat with same scales sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)] # forward_sfam if self.sfam: sources = self.sfam_module(sources) sources[0] = self.Norm(sources[0]) output = [] for (x,cout) in zip(sources, self.conv_out): output.append(cout(x)) return tuple(output)
TUM(first_level=True, input_planes=self.planes//2, is_smooth=self.smooth, scales=self.num_scales,
util.go
package msgpgen import ( "path/filepath" "strings" "github.com/pkg/errors" ) func findImportedName(name, originPkg string) string { name = filepath.Base(name) originPkg = filepath.Base(originPkg) + "." return strings.TrimPrefix(name, originPkg) } func splitType(t string) (pkg, name string, err error)
{ lidx := strings.LastIndex(t, ".") if lidx >= 0 { pkg, name = t[0:lidx], t[lidx+1:] } else { err = errors.Errorf("could not parse '%s', expected format full/pkg/path.Type", t) } return }
completions.py
# Standard Library from functools import reduce from glob import iglob from operator import concat from os import listdir, getenv from os.path import expanduser, isdir, isfile from typing import Dict, Generator, Iterable, List, Set # 3rd Party from prompt_toolkit.completion import CompleteEvent, Completer, Completion, ThreadedCompleter, merge_completers from prompt_toolkit.document import Document from pygments.styles import STYLE_MAP class _MetaCmdCompleter(Completer): META: Dict[str, str] = {f'.{k}': (v[0], v[1] + '.') for k, v in { 'cd': ("[DIR]", 'Change directory to DIR or $HOME if DIR is not provided'), 'dump': ("[FILE]", 'Stringify database into SQL commands or STDOUT if FILE is not provided'), 'exit': ("", 'Exit the REPL'), 'help': ("[PATTERN]", 'Display meta commands matching PATTERN or ALL if PATTERN is not provided'), 'mode': ("[STYLE]", 'Change table style to STYLE or display current style if STYLE is not provided'), 'log': ("[FILE|off]", 'Redirect (implicitly enable) logging into FILE or disable logging with "off", shows current setting with no arg'), 'open': ( "[DATABASE]", 'Close this database and open DATABASE or show current database if DATABASE is not provided'), 'output': ("[FILE]", 'Redirect output of commands to FILE (or to STDOUT if FILE == "stdout"), shows current ' 'output stream if FILE is not provided'), 'print': ("[STRING, ...]", 'Display given STRING in the terminal'), 'prompt': ("[STRING]", 'Change prompt to STRING'), 'quit': ("", 'Exit the REPL'), 'read': ("[FILE]", 'Eval SQL from FILE'), 'save': ("<FILE>", 'Save in-memory database to FILE'), 'schema': ("[PATTERN]", 'Show schemas for tables in the database matching PATTERN'), 'shell': ("<CMD> [ARG, ...]", 'Run an OS command CMD'), 'show': ( "[PATTERN]", 'Display info about the REPL starting with PATTERN or all info if PATTERN is not provided'), 'style': ("[STYLE]", 'Change style to STYLE or show current style if STYLE is not provided'), 'system': ("<CMD> [ARG, ...]", 'Run an OS command CMD with ARGS'), 'tables': ( "[PATTERN]", 'Show tables in the database matching PATTERN or show all tables if PATTERN is not provided'), }.items()} def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if len(doc.current_line.strip()) == 0 or (not doc.text.strip().startswith('.')): return curr_word = doc.get_word_before_cursor(WORD=True) curr_word_upper = curr_word.upper() curr_word_lower = curr_word.lower() start_position, _ = doc.find_boundaries_of_current_word(WORD=True) # only complete on the *first* word starting with a dot ('.') if curr_word.strip() == '' or (len(doc.text.strip().split(' ')) > 1 and curr_word.strip().startswith('.')): return for completion, pair in _MetaCmdCompleter.META.items(): syntax, descr = pair if completion.startswith(curr_word_lower) or completion.startswith(curr_word_upper): yield Completion(completion, start_position=start_position, display_meta=descr) return class _StyleCompleter(Completer): STYLES: Set[str] = set(STYLE_MAP.keys()) def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if (len(doc.text.strip()) == 0) or (not doc.text.strip().startswith('.style')): return curr_word = doc.get_word_before_cursor(WORD=True) curr_word_upper = curr_word.upper() curr_word_lower = curr_word.lower() start_position, _ = doc.find_boundaries_of_current_word(WORD=True) for style in _StyleCompleter.STYLES: if style.startswith(curr_word_lower) or style.startswith(curr_word_upper): yield Completion(style, start_position=start_position, display_meta='style') class _TableStyleCompleter(Completer): STYLES: Set[str] = { 'orgtbl', 'simple', "plain", "simple", "grid", "fancy_grid", "pipe", "orgtbl", "jira", "presto", "psql", "rst", "mediawiki", "moinmoin", "youtrack", "html", "latex", "latex_raw", "latex_booktabs", "textile", } def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if (len(doc.text.strip()) == 0) or (not doc.text.strip().startswith('.mode')): return curr_word = doc.get_word_before_cursor(WORD=True) curr_word_upper = curr_word.upper() curr_word_lower = curr_word.lower() start_position, _ = doc.find_boundaries_of_current_word(WORD=True) for style in _TableStyleCompleter.STYLES: if style.startswith(curr_word_lower) or style.startswith(curr_word_upper): yield Completion(style, start_position=start_position, display_meta='table style') class _ExecutablesCompleter(Completer): from sys import platform CACHE: Set[str] = None if platform.startswith('win'): CACHE = set( filter(lambda x: not ('.' in x), reduce(concat, [listdir(d) for d in filter(isdir, filter(bool, getenv( "PATH").split( ';')))]))) else: CACHE = set( filter(lambda x: not ('.' in x), reduce(concat, [listdir(d) for d in filter(isdir, filter(bool, getenv( "PATH").split( ':')))]))) def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if len(doc.current_line.strip()) == 0: return curr_word = doc.get_word_before_cursor(WORD=True) pos, _ = doc.find_boundaries_of_current_word() if (doc.text.startswith('.shell') or doc.text.startswith('.system')) and len(doc.current_line.split(' ')) < 3: for binary in _ExecutablesCompleter.CACHE: if binary.startswith(curr_word): yield Completion(binary, start_position=pos, display_meta='executable') class _FileCompleter(Completer): def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if (len(doc.current_line.strip()) == 0) or \ ((not (doc.text.strip().startswith('.dump'))) and (not doc.text.strip().startswith('.read')) and (not doc.text.strip().startswith('.open')) and (not doc.text.strip().startswith('.log')) and (not doc.text.strip().startswith('.output'))): return pos, _ = doc.find_boundaries_of_current_word(WORD=True) word_ = doc.get_word_under_cursor(WORD=True) word = expanduser(word_) for node in iglob(expanduser(word_) + '*'): if node.startswith(word) and isfile(node): yield Completion(node, start_position=pos, display_meta='file') class _CdCompleter(Completer): def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if (len(doc.current_line.strip()) == 0) or (not doc.text.strip().startswith('.cd')): return pos, _ = doc.find_boundaries_of_current_word(WORD=True) word_ = doc.get_word_under_cursor(WORD=True) word = expanduser(word_) for node in iglob(expanduser(word_) + '*'): if node.startswith(word) and isdir(node): yield Completion(node, start_position=pos, display_meta='dir') yield Completion('..', start_position=pos, display_meta='dir (parent dir)') class _FileSystemCompleter(Completer): def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: # .cd handled by the CdCompleter if (len(doc.current_line.strip()) == 0) or doc.text.strip().startswith('.cd'): return patterns: List[str] = ['./', '/', '~/'] pos, _ = doc.find_boundaries_of_current_word(WORD=True) word_ = doc.get_word_under_cursor(WORD=True) word = expanduser(word_) for pattern in patterns: if word_.startswith(pattern): for node in iglob(expanduser(word_) + '*'): if node.startswith(word): yield Completion(node, start_position=pos, display_meta=('file' if isfile(node) else 'dir')) return class _SQLCompleter(Completer): KEYWORDS: Iterable[str] = [ 'ABORT', 'ACTION', 'ADD COLUMN', 'ADD', 'ADD', 'AFTER', 'ALL', 'ALTER DATABASE', 'ALTER TABLE', 'ANALYZE', 'AND', 'ASC', 'ATTACH DATABASE', 'AUTOINCREMENT', 'BEFORE', 'BEGIN DEFERRED TRANSACTION', 'BEGIN EXCLUSIVE TRANSACTION', 'BEGIN IMMEDIATE TRANSACTION', 'BEGIN TRANSACTION', 'BETWEEN', 'BY', 'CASCADE', 'CASE', 'CAST', 'CHECK', 'COLLATE', 'COLUMN', 'COMMIT TRANSACTION', 'CONFLICT', 'CONSTRAINT', 'CREATE INDEX', 'CREATE TABLE', 'CREATE TRIGGER', 'CREATE VIEW', 'CREATE VIRTUAL TABLE', 'CREATE TEMPORARY VIEW', 'CREATE TEMPORARY TABLE', 'CREATE TEMPORARY TRIGGER', 'CREATE TEMPORARY INDEX', 'CREATE TEMPORARY VIRTUAL TABLE', 'CREATE TEMPORARY VIEW IF NOT EXISTS', 'CREATE TEMPORARY TABLE IF NOT EXISTS', 'CREATE TEMPORARY TRIGGER IF NOT EXISTS', 'CREATE TEMPORARY INDEX IF NOT EXISTS', 'CREATE TEMPORARY VIRTUAL TABLE IF NOT EXISTS', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'DATABASE', 'DEFAULT', 'DEFAULT VALUES', 'DEFERRABLE', 'DEFERRED', 'DELETE', 'DESC', 'DETACH DATABASE', 'DISTINCT', 'DROP INDEX', 'DROP TABLE', 'DROP TRIGGER', 'DROP VIEW', 'ELSE', 'END TRANSACTION', 'ESCAPE', 'EXCEPT', 'EXCLUSIVE', 'EXISTS', 'EXPLAIN', 'FAIL', 'FOR EACH ROW', 'FOR', 'FOREIGN', 'FROM', 'FULL', 'GROUP BY', 'HAVING', 'IF EXISTS', 'IF NOT EXISTS', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INDEX', 'INDEXED BY', 'INITIALLY', 'INNER', 'INSERT INTO', 'INSERT OR ABORT INTO', 'INSERT OR FAIL INTO', 'INSERT OR IGNORE INTO', 'INSERT OR REPLACE INTO', 'INSERT OR ROLLBACK INTO', 'INSTEAD OF', 'INTERSECT', 'INTO', 'IS NOT', 'IS', 'ISNULL', 'JOIN', 'KEY', 'LEFT', 'LIKE', 'LIMIT', 'MATCH', 'NATURAL', 'NOT BETWEEN', 'NOT EXISTS', 'NOT GLOB', 'NOT IN', 'NOT INDEXED', 'NOT LIKE', 'NOT MATCH', 'NOT NULL', 'NOT REGEXP', 'NOT', 'NOTNULL', 'OF', 'OFFSET', 'ON CONFLICT ABORT', 'ON CONFLICT FAIL', 'ON CONFLICT IGNORE', 'ON CONFLICT REPLACE', 'ON CONFLICT ROLLBACK', 'ON CONFLICT', 'ON', 'OR', 'ORDER BY', 'OUTER', 'OVER', 'PLAN', 'PRAGMA', 'PRIMARY KEY', 'QUERY PLAN', 'QUERY', 'RAISE', 'RECURSIVE', 'REFERENCES', 'REGEXP', 'REINDEX', 'RELEASE SAVEPOINT', 'RENAME COLUMN', 'RENAME TO', 'RENAME', 'REPLACE', 'RESTRICT', 'RIGHT', 'ROLLBACK TO SAVEPOINT', 'ROLLBACK TRANSACTION TO SAVEPOINT', 'ROLLBACK TRANSACTION', 'ROW', 'SAVEPOINT', 'SELECT * FROM', 'SELECT', 'SET', 'TABLE', 'TEMPORARY', 'THEN', 'TO', 'TRANSACTION', 'TRIGGER', 'UNION', 'UNIQUE', 'UPDATE OR ABORT', 'UPDATE OR FAIL', 'UPDATE OR IGNORE', 'UPDATE OR REPLACE', 'UPDATE OR ROLLBACK', 'UPDATE', 'USING', 'VACUUM', 'VALUES', 'VIEW', 'VIRTUAL', 'WHEN', 'WHERE', 'WITH', 'WITHOUT', ] PRAGMAS: Iterable[str] = [ 'application_id', 'auto_vacuum', 'automatic_index', 'busy_timeout', 'cache_size', 'cache_spill', 'case_sensitive_like', 'cell_size_check', 'checkpoint_fullfsync', 'collation_list', 'compile_options', 'data_version', 'database_list', 'encoding', 'foreign_key_check', 'foreign_key_list', 'foreign_keys', 'freelist_count', 'fullfsync', 'function_list', 'ignore_check_constraints', 'incremental_vacuum', 'index_info', 'index_list', 'index_xinfo', 'integrity_check', 'journal_mode', 'journal_size_limit', 'legacy_alter_table', 'legacy_file_format', 'locking_mode', 'max_page_count', 'mmap_size', 'module_list', 'optimize', 'page_count', 'page_size', 'parser_trace', 'pragma_list', 'query_only', 'quick_check', 'read_uncommitted', 'recursive_triggers', 'reverse_unordered_selects', 'shrink_memory', 'soft_heap_limit', 'stats', 'synchronous', 'table_info', 'temp_store', 'threads', 'user_version', 'vdbe_addoptrace', 'vdbe_debug', 'vdbe_listing', 'vdbe_trace', 'wal_autocheckpoint', 'wal_checkpoint', 'writable_schema', ] AGGR_FUNCTS: Iterable[str] = [i + '(' for i in [ 'avg', 'count', 'count', 'group_concat', 'group_concat', 'max', 'min', 'sum', ]] TABLES: Iterable[str] = [ 'sqlite_master', 'sqlite_sequence', ] FUNCTS: Iterable[str] = [i + '(' for i in [ 'abs', 'changes', 'char', 'coalesce', 'date', 'glob', 'hex', 'ifnull', 'instr', 'count', 'group_concat', 'last_insert_rowid', 'length', 'like', 'likelihood', 'likely', 'load_extension', 'lower', 'ltrim', 'max', 'min', 'nullif', 'printf', 'quote', 'quote', 'random', 'julianday', 'datetime', 'randomblob', 'replace', 'round', 'rtrim', 'soundex', 'sqlite_compileoption_get', 'sqlite_compileoption_used', 'sqlite_source_id', 'sqlite_version', 'substr', 'strftime', 'total_changes', 'total', 'trim', 'typeof', 'unicode', 'unlikely', 'upper', 'zeroblob', ]] DTYPES: Iterable[str] = [ 'BLOB', 'INTEGER', 'NULL', 'REAL', 'TEXT', ] NUMERIC: Iterable[str] = [ 'BOOLEAN', 'DATE', 'DATETIME', 'DECIMAL(10,5)', 'NUMERIC', ] TEXT: Iterable[str] = [ 'CHARACTER(20)', 'CLOB', 'NATIVE CHARACTER(70)', 'NCHAR(255)', 'NVARCHAR(100)', 'VARCHAR(255)', 'VARYING CHARACTER(255)', ] REAL: Iterable[str] = [ 'DOUBLE PRECISION', 'DOUBLE', 'FLOAT', ] INTEGER: Iterable[str] = [ 'BIGINT', 'INT', 'INT2', 'INT8', 'MEDIUMINT', 'SMALLINT', 'TINYINT', 'UNSIGNED BIG INT', ] def get_completions(self, doc: Document, event: CompleteEvent) -> Generator[Completion, None, None]: if len(doc.current_line.strip()) == 0 or doc.text.strip().startswith('.'): return word = doc.get_word_before_cursor(WORD=True) word_upper = word.upper() word_lower = word.lower() pos = doc.find_boundaries_of_current_word(WORD=True)[0] def
(completion: str) -> bool: return completion.startswith(word_lower) or completion.startswith(word_upper) def from_iter(words: Iterable[str], meta_info: str) -> Generator[Completion, None, None]: for w in filter(matches, words): yield Completion(w, start_position=pos, display_meta=meta_info) yield from from_iter(_SQLCompleter.PRAGMAS, "pragma") yield from from_iter([f'pragma_{i}(' for i in _SQLCompleter.PRAGMAS], "pragma function") yield from from_iter(_SQLCompleter.AGGR_FUNCTS, "aggregate function") yield from from_iter(_SQLCompleter.KEYWORDS, "keyword") yield from from_iter(_SQLCompleter.TABLES, "table") yield from from_iter(_SQLCompleter.FUNCTS, "function") yield from from_iter(_SQLCompleter.DTYPES, "data type") yield from from_iter(_SQLCompleter.NUMERIC, "NUMERIC (alias)") yield from from_iter(_SQLCompleter.TEXT, "TEXT (alias)") yield from from_iter(_SQLCompleter.REAL, "REAL (alias)") yield from from_iter(_SQLCompleter.INTEGER, "INTEGER (alias)") def SQLiteCompleter() -> Completer: return ThreadedCompleter( merge_completers([ _MetaCmdCompleter(), _ExecutablesCompleter(), _FileSystemCompleter(), _TableStyleCompleter(), _FileCompleter(), _StyleCompleter(), _CdCompleter(), _SQLCompleter(), ]))
matches
proxy.go
// Package reverseproxy 反向代理 package reverseproxy import ( "errors" "fmt" "net/http/httputil" "net/url" "strings" "sync" "github.com/sirupsen/logrus" ) var ( ErrorUpstreamNotValid = errors.New("upstream not valid") ErrorUpstreamExists = errors.New("upstream exists") ErrorUpstreamNotFound = errors.New("upstream not found") ) var m *Manager var managerOnce sync.Once type Manager struct { servers map[string]Upstream mu sync.RWMutex } type Upstream struct { Proxy *httputil.ReverseProxy } func GetManager() *Manager { managerOnce.Do(func() { m = &Manager{ servers: make(map[string]Upstream), } }) return m } func (m *Manager) GetUpstream(path string) (up Upstream, err error) { logrus.Debugf("get upstream %s", path) m.mu.RLock() defer m.mu.RUnlock() up, ok := m.servers[path] if !ok { err = ErrorUpstreamNotFound logrus.Errorf("upstream %s not found", path) return } logrus.Debugf("upstream %s found", path) return } // RegisterUpstream 注册一个反向代理后端 // serverUrl 支持 :8090, 127.0.0.1:8090,http://127.0.0.1:8090 格式 func (m *Manager) RegisterUpstream(path string, serverUrl string) (err error) { if len(path) == 0 || len(serverUrl) == 0 { err = ErrorUpstreamNotValid return } if strings.HasPrefix(serverUrl, ":") { // 只提供端口 serverUrl = fmt.Sprintf("http://127.0.0.1%s", serverUrl) } else if !strings.HasPrefix(serverUrl, "http") { serverUrl = fmt.Sprintf("http://%s", serverUrl) } su, err := url.Parse(serverUrl) if err != nil { return err } m.mu.Lock() defer m.mu.Unlock() _, ok := m.servers[path] if ok { err = ErrorUpstreamExists return } up := Upstream{ Proxy: httputil.NewSingleHostReverseProxy(su), } m.servers[path] = up return } func (m *Manager) UnregisterUpstream(path string) (err error) { if len(path) == 0 { err = ErrorUpstreamNotValid return } m.mu.Lock() defer m.mu.Unlock() delete(m.servers, path) return } func RegisterUpstream(path string, serverUrl string)
eturn GetManager().RegisterUpstream(path, serverUrl) } func UnregisterUpstream(path string) (err error) { return GetManager().UnregisterUpstream(path) }
(err error) { r
pearson_model_param_pb2.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pearson-model-param.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='pearson-model-param.proto', package='com.welab.wefe.core.mlmodel.buffer', syntax='proto3', serialized_options=b'B\026PearsonModelParamProto', serialized_pb=b'\n\x19pearson-model-param.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\x16\n\x05Names\x12\r\n\x05names\x18\x01 \x03(\t\"/\n\x0c\x41nonymousMap\x12\x11\n\tanonymous\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x8a\x02\n\x11PearsonModelParam\x12\r\n\x05party\x18\x01 \x01(\t\x12\x0f\n\x07parties\x18\x02 \x03(\t\x12\r\n\x05shape\x18\x03 \x01(\x05\x12\x0e\n\x06shapes\x18\x04 \x03(\x05\x12\r\n\x05names\x18\x05 \x03(\t\x12G\n\ranonymous_map\x18\t \x03(\x0b\x32\x30.com.welab.wefe.core.mlmodel.buffer.AnonymousMap\x12\x0c\n\x04\x63orr\x18\x06 \x03(\x01\x12\x12\n\nlocal_corr\x18\x07 \x03(\x01\x12<\n\tall_names\x18\x08 \x03(\x0b\x32).com.welab.wefe.core.mlmodel.buffer.NamesB\x18\x42\x16PearsonModelParamProtob\x06proto3' ) _NAMES = _descriptor.Descriptor( name='Names', full_name='com.welab.wefe.core.mlmodel.buffer.Names', filename=None,
fields=[ _descriptor.FieldDescriptor( name='names', full_name='com.welab.wefe.core.mlmodel.buffer.Names.names', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=65, serialized_end=87, ) _ANONYMOUSMAP = _descriptor.Descriptor( name='AnonymousMap', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='anonymous', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.anonymous', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='com.welab.wefe.core.mlmodel.buffer.AnonymousMap.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=89, serialized_end=136, ) _PEARSONMODELPARAM = _descriptor.Descriptor( name='PearsonModelParam', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='party', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.party', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='parties', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.parties', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shape', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shape', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='shapes', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.shapes', index=3, number=4, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.names', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='anonymous_map', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.anonymous_map', index=5, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.corr', index=6, number=6, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='local_corr', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.local_corr', index=7, number=7, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='all_names', full_name='com.welab.wefe.core.mlmodel.buffer.PearsonModelParam.all_names', index=8, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=139, serialized_end=405, ) _PEARSONMODELPARAM.fields_by_name['anonymous_map'].message_type = _ANONYMOUSMAP _PEARSONMODELPARAM.fields_by_name['all_names'].message_type = _NAMES DESCRIPTOR.message_types_by_name['Names'] = _NAMES DESCRIPTOR.message_types_by_name['AnonymousMap'] = _ANONYMOUSMAP DESCRIPTOR.message_types_by_name['PearsonModelParam'] = _PEARSONMODELPARAM _sym_db.RegisterFileDescriptor(DESCRIPTOR) Names = _reflection.GeneratedProtocolMessageType('Names', (_message.Message,), { 'DESCRIPTOR': _NAMES, '__module__': 'pearson_model_param_pb2' # @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.Names) }) _sym_db.RegisterMessage(Names) AnonymousMap = _reflection.GeneratedProtocolMessageType('AnonymousMap', (_message.Message,), { 'DESCRIPTOR': _ANONYMOUSMAP, '__module__': 'pearson_model_param_pb2' # @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.AnonymousMap) }) _sym_db.RegisterMessage(AnonymousMap) PearsonModelParam = _reflection.GeneratedProtocolMessageType('PearsonModelParam', (_message.Message,), { 'DESCRIPTOR': _PEARSONMODELPARAM, '__module__': 'pearson_model_param_pb2' # @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.PearsonModelParam) }) _sym_db.RegisterMessage(PearsonModelParam) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
file=DESCRIPTOR, containing_type=None,
planets.model.js
// const planets = []; // module.exports = planets; const { parse } = require('csv-parse'); const fs = require('fs'); const path = require('path'); const results = []; function isHabitable(planet) { return planet['koi_disposition'] === "CONFIRMED" && planet['koi_insol'] <= 1.11 && planet['koi_insol'] >= 0.36 && planet['koi_prad'] < 1.6; } function loadHabitablePlanets() { //if I don't wrap the file-parsing code inside a promise, it will keep running asynchronously in the background //but our code execution doesn't stop. It will send empty planets array outside without any data, because file has not been parsed yet. //So by using Promise, we 'wait' for our 'csv' file to be parsed, and our 'results' array to be filled with correct data (habitable planets). //after everything is finished, client can work with data. //now if we require('planets.models.js'), this current code will execute and we will get the full array(data) to work on. //otherwise(without promise) no data is there to work upon. //wrapped inside Promise return new Promise( (resolve, reject) => { fs.createReadStream(path.join(__dirname,'..','..','data','KeplerData.csv')) .pipe(parse({ comment: '#', columns: true, })) .on('data', data => { if (isHabitable(data)) results.push(data); }) .on('error', err => reject(err)) //on error, reject promise. .on('end', () => { //on end, resolve promise. resolve(); });
function getAllPlanets() { return results; } module.exports = { loadHabitablePlanets, //planets: results, getAllPlanets, };
}) }
main.rs
use prelude::read_i64; /// By considering the terms in the Fibonacci sequence whose /// values do not exceed MAX, find the sum of the even-valued terms.
struct Fib { curr: i64, next: i64, } impl Fib { pub fn new() -> Fib { Fib { curr: 1, next: 1, } } pub fn even_sum_till(&mut self, max: i64) -> i64 { self.filter(|x| x % 2 == 0).take_while(|&x| x <= max).sum() } } impl Iterator for Fib { type Item = i64; fn next(&mut self) -> Option<i64> { let next = self.curr + self.next; self.curr = self.next; self.next = next; Some(self.curr) } } fn main() { let num = read_i64(); for _ in 0..num { println!("{}", Fib::new().even_sum_till(read_i64())); } }
service_test.py
# coding=utf-8 # import time import copy import pytest import logging import unittest import threading import concurrent.futures from multiprocessing import Manager from soocii_pubsub_lib import pubsub_client, sub_service # ========== Initial Logger ========== logging.basicConfig( level=logging.DEBUG, format='[%(asctime)-15s][%(thread)d][%(levelname)-5s][%(filename)s][%(funcName)s#%(lineno)d] %(message)s') logger = logging.getLogger(__name__) # ==================================== # normal subscribe @pytest.mark.usefixtures("start_emulator") class NormalSubscribeTests(unittest.TestCase): def setUp(self): self.project = 'fake-project' self.cred = None self.topic = 'fake-topic' self.published_message_id = None # self.received_message = None # self.received_message_counts = 0 self.service = None # shared variables due to multi-threading manager = Manager() self.lock = threading.Lock() self.received_message = manager.dict() self.received_message_counts = manager.Value('i', 0) def tearDown(self):
def __on_published(self, message_id): logger.info('message is published with message id: {}'.format(message_id)) self.published_message_id = message_id def __on_received(self, message): try: with self.lock: logger.info('message is received with payload: {}'.format(message)) self.received_message = copy.deepcopy(message) self.received_message_counts.value = self.received_message_counts.value + 1 logger.info('received_message: {}, received_message_counts: {}'.format(self.received_message, self.received_message_counts.value)) except Exception as e: logger.exception('unexpected exception was caughted: {}'.format(e)) # ack message logger.info('ack message') return True def __publisher(self): # prepare publisher publisher = pubsub_client.PublisherClient(self.project, self.cred) # get configuration of the topic before sending request exception_caughted = False try: publisher.get_topic(self.topic) except Exception as e: exception_caughted = True logger.exception('unexpected exception was caughted: {}'.format(e)) self.assertFalse(exception_caughted) # publish bytes logger.info('start publishing message') for _ in range(5): publisher.publish(self.topic, b'bytes data', callback=lambda message_id: self.__on_published(message_id)) time.sleep(0.5) def __subscriber(self): # prepare subscriber self.subscription = pubsub_client.SubscribeClient(self.project, self.cred) self.subscription.create_subscription(self.topic, 'fake-subscription') self.service = sub_service.SubscriptionService(self.subscription) logger.info('start subscribing message') self.service.run(callback=lambda message: self.__on_received(message)) def __waitter(self): # wait for callback time.sleep(10) self.service.shutdown() # @pytest.mark.skip(reason="not reliable in travis CI") def test_subscribe_message(self): # prepare publisher publisher = pubsub_client.PublisherClient(self.project, self.cred) publisher.create_topic(self.topic) # prepare subscriber self.subscription = pubsub_client.SubscribeClient(self.project, self.cred) self.subscription.create_subscription(self.topic, 'fake-subscription') with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: executor.submit(lambda: self.__waitter()) self.__publisher() # subscriber service MUST run in main thread self.__subscriber() # verify if message has been received assert self.received_message is not None assert self.received_message['data'] == b'bytes data' assert self.received_message['attributes'] == {} assert self.received_message_counts.value == 5
pass
host_infos_test.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law orupd agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package objects import ( "context" "errors" "testing" "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/suite" hostpb "github.com/uber/peloton/.gen/peloton/api/v0/host" pelotonpb "github.com/uber/peloton/.gen/peloton/api/v0/peloton" "github.com/uber/peloton/pkg/storage/objects/base" ormmocks "github.com/uber/peloton/pkg/storage/orm/mocks" ) type HostInfoObjectTestSuite struct { suite.Suite } func (s *HostInfoObjectTestSuite) SetupTest() { setupTestStore() } func TestHostInfoObjectSuite(t *testing.T) {
} // TestHostInfo tests ORM DB operations for HostInfo func (s *HostInfoObjectTestSuite) TestHostInfo() { db := NewHostInfoOps(testStore) testHostInfo := &hostpb.HostInfo{ Hostname: "hostname1", Ip: "1.2.3.4", State: hostpb.HostState_HOST_STATE_UP, GoalState: hostpb.HostState_HOST_STATE_DRAINING, CurrentPool: "pool1", DesiredPool: "pool2", } labels := make(map[string]string) for _, label := range testHostInfo.Labels { labels[label.Key] = label.Value } // Test Create err := db.Create( context.Background(), testHostInfo.Hostname, testHostInfo.Ip, testHostInfo.State, testHostInfo.GoalState, labels, testHostInfo.CurrentPool, testHostInfo.DesiredPool) s.NoError(err) // Test Get hostInfoGet, err := db.Get(context.Background(), testHostInfo.Hostname) s.NoError(err) s.Equal(testHostInfo, hostInfoGet) // Test GetAll testHostInfo2 := &hostpb.HostInfo{ Hostname: "hostname2", Ip: "5.6.7.8", State: hostpb.HostState_HOST_STATE_UP, GoalState: hostpb.HostState_HOST_STATE_DRAINING, CurrentPool: "pool1", DesiredPool: "pool2", } labels2 := make(map[string]string) for _, label := range testHostInfo2.Labels { labels2[label.Key] = label.Value } err = db.Create( context.Background(), testHostInfo2.Hostname, testHostInfo2.Ip, testHostInfo2.State, testHostInfo2.GoalState, labels2, testHostInfo2.CurrentPool, testHostInfo2.DesiredPool) s.NoError(err) hostInfosAll, err := db.GetAll(context.Background()) s.NoError(err) s.Len(hostInfosAll, 2) // fetched records in reverse created order / latest first s.Equal(testHostInfo2, hostInfosAll[0]) s.Equal(testHostInfo, hostInfosAll[1]) // Test Update testHostInfo.State = hostpb.HostState_HOST_STATE_DRAINING testHostInfo.GoalState = hostpb.HostState_HOST_STATE_DRAINED testHostInfo.Labels = []*pelotonpb.Label{ { Key: "label1", Value: "value1", }, } labels = make(map[string]string) for _, label := range testHostInfo.Labels { labels[label.Key] = label.Value } testHostInfo.DesiredPool = "pool1" err = db.Update( context.Background(), testHostInfo.Hostname, testHostInfo.State, testHostInfo.GoalState, labels, testHostInfo.CurrentPool, testHostInfo.DesiredPool) s.NoError(err) hostInfoGot, err := db.Get(context.Background(), testHostInfo.Hostname) s.NoError(err) s.Equal(testHostInfo, hostInfoGot) s.EqualValues(testHostInfo.DesiredPool, hostInfoGot.DesiredPool) // Test Delete err = db.Delete(context.Background(), testHostInfo.Hostname) s.NoError(err) hostInfo, err := db.Get(context.Background(), testHostInfo.Hostname) s.Error(err) s.Nil(hostInfo) hostInfosAll, err = db.GetAll(context.Background()) s.NoError(err) s.Len(hostInfosAll, 1) s.Equal(testHostInfo2, hostInfosAll[0]) } // TestCreateGetGetAllDeleteHostInfoFail tests failure cases due to ORM Client errors func (s *HostInfoObjectTestSuite) TestCreateGetGetAllDeleteHostInfoFail() { ctrl := gomock.NewController(s.T()) defer ctrl.Finish() mockClient := ormmocks.NewMockClient(ctrl) mockStore := &Store{oClient: mockClient, metrics: testStore.metrics} db := NewHostInfoOps(mockStore) mockClient.EXPECT().Create(gomock.Any(), gomock.Any()).Return(errors.New("Create failed")) mockClient.EXPECT().Get(gomock.Any(), gomock.Any()). Return(nil, errors.New("Get failed")) mockClient.EXPECT().GetAll(gomock.Any(), gomock.Any()). Return(nil, errors.New("GetAll failed")) mockClient.EXPECT().Delete(gomock.Any(), gomock.Any()). Return(errors.New("Delete failed")) ctx := context.Background() testHostInfo := &hostpb.HostInfo{ Hostname: "hostname1", Ip: "1.2.3.4", State: hostpb.HostState_HOST_STATE_UP, GoalState: hostpb.HostState_HOST_STATE_DRAINING, CurrentPool: "pool1", DesiredPool: "pool2", } labels := make(map[string]string) for _, label := range testHostInfo.Labels { labels[label.Key] = label.Value } err := db.Create( ctx, testHostInfo.Hostname, testHostInfo.Ip, testHostInfo.State, testHostInfo.GoalState, labels, testHostInfo.CurrentPool, testHostInfo.DesiredPool) s.Error(err) s.Equal("Create failed", err.Error()) _, err = db.Get(ctx, testHostInfo.Hostname) s.Error(err) s.Equal("Get failed", err.Error()) _, err = db.GetAll(ctx) s.Error(err) s.Equal("GetAll failed", err.Error()) err = db.Delete(ctx, testHostInfo.Hostname) s.Error(err) s.Equal("Delete failed", err.Error()) } func (s *HostInfoObjectTestSuite) TestNewHostInfoFromHostInfoObject() { hostInfoObject := &HostInfoObject{ Hostname: &base.OptionalString{Value: "hostname"}, IP: "1.2.3.4", State: "HOST_STATE_UP", GoalState: "HOST_STATE_DRAINING", Labels: "{}", UpdateTime: time.Now(), } info, err := newHostInfoFromHostInfoObject(hostInfoObject) s.NoError(err) s.Equal( &hostpb.HostInfo{ Hostname: "hostname", Ip: "1.2.3.4", State: hostpb.HostState_HOST_STATE_UP, GoalState: hostpb.HostState_HOST_STATE_DRAINING, }, info, ) }
suite.Run(t, new(HostInfoObjectTestSuite))
ext-trigger.component.js
import _inheritsLoose from "@babel/runtime/helpers/inheritsLoose"; import Ext_field_trigger_Trigger from './Ext/field/trigger/Trigger.js'; import ElementParser from './common/ElementParser.js'; var EWCTrigger = /*#__PURE__*/function (_Ext_field_trigger_Tr) { _inheritsLoose(EWCTrigger, _Ext_field_trigger_Tr); function
() { var _this; _this = _Ext_field_trigger_Tr.call(this, [], []) || this; _this.xtype = 'trigger'; return _this; } return EWCTrigger; }(Ext_field_trigger_Trigger); export { EWCTrigger as default }; try { if (window.customElements.get('ext-trigger') == undefined) { window.customElements.define('ext-trigger', ElementParser.withParsedCallback(EWCTrigger)); } } catch (e) { if (window.customElements.get('ext-trigger') == undefined) { window.customElements.define('ext-trigger', EWCTrigger); } }
EWCTrigger
pixel.rs
use super::Authentication; use super::endpoint; use super::response; use super::response::{ApiRequestResult}; use super::http_client::{HttpClient, RequestContext, MethodType, HeaderType}; use super::error::PixelaClientError; use std::collections::HashMap; use failure::Error; use serde_json; use serde_json::{Number}; /// Data representing the quantity of each day. #[derive(Serialize, Deserialize, Debug)] pub struct Pixel { /// The date on which the quantity is to be recorded. It is specified in yyyyMMdd format. pub date: String, /// Specify the quantity to be registered on the specified date. /// Validation rule: int^\-?[0-9]+ float^\-?[0-9]+\.[0-9]+ pub quantity: String, } #[derive(Serialize, Deserialize, Debug)] pub struct PixelQuantity { pub quantity: Number, } pub(crate) fn
<T>(auth: &Authentication, graph_id: &str, pixel: &Pixel) -> Result<(), Error> where T: HttpClient { let body = serde_json::to_string(pixel)?; let uri = endpoint::graph(&auth.username, graph_id); let context = RequestContext::new( &uri, MethodType::Post, Some(&body), Some(&auth.token), ); let body = T::do_request(&context)?; response::build_result(&body) } pub(crate) fn update<T>(auth: &Authentication, graph_id: &str, pixel: &Pixel) -> Result<(), Error> where T: HttpClient { let mut hash: HashMap<&str, &str> = HashMap::new(); hash.insert("quantity", &pixel.quantity); let body = serde_json::to_string(&hash)?; let uri = endpoint::pixel(&auth.username, graph_id, &pixel.date); let context = RequestContext::new( &uri, MethodType::Put, Some(&body), Some(&auth.token), ); let body = T::do_request(&context)?; response::build_result(&body) } pub(crate) fn delete<T>(auth: &Authentication, graph_id: &str, date: &str) -> Result<(), Error> where T: HttpClient { let uri = endpoint::pixel(&auth.username, graph_id, date); let context = RequestContext::new( &uri, MethodType::Delete, None, Some(&auth.token), ); let body = T::do_request(&context)?; response::build_result(&body) } pub(crate) fn get<T>(auth: &Authentication, graph_id: &str, date: &str) -> Result<Pixel, Error> where T: HttpClient { let uri = endpoint::pixel(&auth.username, graph_id, date); let context = RequestContext::new( &uri, MethodType::Get, None, Some(&auth.token), ); let body = T::do_request(&context)?; let res: Result<ApiRequestResult, _> = serde_json::from_str(&body); if let Ok(v) = res { return Err(PixelaClientError::RequestNotSuccess(v.message).into()); } let res: PixelQuantity = serde_json::from_str(&body)?; Ok(Pixel { date: date.to_owned(), quantity: res.quantity.to_string(), }) } pub(crate) fn increment<T>(auth: &Authentication, graph_id: &str) -> Result<(), Error> where T: HttpClient { let uri = endpoint::increment(&auth.username, graph_id); let mut context = RequestContext::new( &uri, MethodType::Put, None, Some(&auth.token), ); context.insert_header(HeaderType::ContentLength, "0"); let body = T::do_request(&context)?; response::build_result(&body) } pub(crate) fn decrement<T>(auth: &Authentication, graph_id: &str) -> Result<(), Error> where T: HttpClient { let uri = endpoint::decrement(&auth.username, graph_id); let mut context = RequestContext::new( &uri, MethodType::Put, None, Some(&auth.token), ); context.insert_header(HeaderType::ContentLength, "0"); let body = T::do_request(&context)?; response::build_result(&body) } #[cfg(test)] mod pixel_test { use super::*; #[test] fn pixel_quantity_deserialize_test() { let res: Result<PixelQuantity, _> = serde_json::from_str(r#"{"quantity":50}"#); if let Ok(v) = res { assert_eq!(v.quantity.to_string(), "50"); } else { panic!("failed serialize params"); }; } }
create
cmd.go
package cmd import ( "apim-rest-client/comm" "apim-rest-client/constants" "apim-rest-client/dcr" "apim-rest-client/persist" "apim-rest-client/token" "bytes" "flag" "fmt" "io/ioutil" "net/http" "os" ) type APIOptions struct { API string Method string Resource string Headers *FlagMap QueryParams *FlagMap FormData *FlagMap Body string IsVerbose bool } type BasePaths struct { PublisherAPI string StoreAPI string AdminAPI string } func Validate(apiOptions *APIOptions) { if apiOptions.IsVerbose { fmt.Printf("apiOptions: %+v\n", apiOptions) } if !persist.IsConfigExists() { fmt.Println("'config.json' file does not exist. Please execute 'arc init' to create the config file") os.Exit(1) } if stringInList(constants.UNDEFINED_STRING, []string{apiOptions.API, apiOptions.Method, apiOptions.Resource}) { fmt.Println("Mandatory arguments 'api', 'method' and 'resource' need to be provided.") fmt.Println() flag.Usage() os.Exit(1) } if !stringInList(apiOptions.API, []string{constants.PublisherAPI, constants.StoreAPI, constants.AdminAPI}) { fmt.Printf("Unsupported value %s provided 'api' argument", apiOptions.API) fmt.Println() fmt.Println() flag.Usage() os.Exit(1) } if !stringInList(apiOptions.Method, []string{constants.GET, constants.POST, constants.PUT, constants.DELETE}) { fmt.Printf("Unsupported value %s provided 'method' argument", apiOptions.Method) fmt.Println() fmt.Println() flag.Usage() os.Exit(1) } } func RefreshExistingTokens(confJSON *persist.Config, isVerbose bool) { if isVerbose { fmt.Println("Credentials already exist") } credentials := persist.ReadAppCredentials() tokenResp, error := token.RefreshToken(confJSON.TokenURL, credentials.ClientID, credentials.ClientSecret, credentials.RefreshToken, confJSON.Scope, isVerbose) if error != nil { fmt.Printf("Error returned in when refreshing token. error : %s, error_description : %s\n", error.ErrorType, error.ErrorDescription) if error.ErrorType == "invalid_client" { fmt.Println("\nRegistered client does not exist, please execute `arc clear` and then rerun the desired command.") os.Exit(1) } tokenResp, error = token.RequestToken_PasswordGrant(confJSON.TokenURL, credentials.ClientID, credentials.ClientSecret, confJSON.UserName, confJSON.Password, confJSON.Scope, isVerbose) } if error == nil { // Store new access token and refresh token credentials.AccessToken = tokenResp.AccessToken
persist.SaveAppCredentials(&credentials) } else { fmt.Printf("Error returned in when requesting new token. error : %s, error_description : %s\n", error.ErrorType, error.ErrorDescription) } } func RegisterClient(confJSON *persist.Config, isVerbose bool) persist.OAuthCredentials { if isVerbose { fmt.Println("Credentials do not exist") } var dcrRequest dcr.DCRRequest dcr.SetDCRParameters(&dcrRequest, confJSON.UserName) dcrResp := dcr.Register(confJSON.DcrURL, confJSON.UserName, confJSON.Password, dcrRequest, isVerbose) var credentials persist.OAuthCredentials credentials.ClientID = dcrResp.ClientId credentials.ClientSecret = dcrResp.ClientSecret return credentials } func GetTokens(credentials *persist.OAuthCredentials, confJSON *persist.Config, isVerbose bool) { tokenResp, error := token.RequestToken_PasswordGrant(confJSON.TokenURL, credentials.ClientID, credentials.ClientSecret, confJSON.UserName, confJSON.Password, confJSON.Scope, isVerbose) if error == nil { credentials.AccessToken = tokenResp.AccessToken credentials.RefreshToken = tokenResp.RefreshToken persist.SaveAppCredentials(credentials) } else { fmt.Printf("Error returned in when requesting new token. error : %s, error_description : %s\n", error.ErrorType, error.ErrorDescription) } } func InvokeAPI(apiOptions *APIOptions, basePaths *BasePaths, token string) { var basePath string switch apiOptions.API { case constants.PublisherAPI: basePath = basePaths.PublisherAPI case constants.StoreAPI: basePath = basePaths.StoreAPI case constants.AdminAPI: basePath = basePaths.AdminAPI default: fmt.Println("Unsupported API base path") return } fullPath := basePath + apiOptions.Resource var req *http.Request var body *bytes.Buffer var contentType string switch apiOptions.Method { case constants.GET: req = comm.CreateGet(fullPath) case constants.DELETE: req = comm.CreateDelete(fullPath) case constants.POST: body, contentType = getBodyContent(apiOptions) if body == nil { req = comm.CreatePostEmptyBody(fullPath) } else { req = comm.CreatePost(fullPath, body) } case constants.PUT: body, contentType = getBodyContent(apiOptions) if body == nil { req = comm.CreatePutEmptyBody(fullPath) } else { req = comm.CreatePut(fullPath, body) } } comm.SetDefaultRestAPIHeaders(token, contentType, req) headers := http.Header{} for k, v := range *apiOptions.Headers { headers.Add(k, v) } comm.AddHeaders(&headers, req) values := req.URL.Query() for k, v := range *apiOptions.QueryParams { values.Add(k, v) } comm.AddQueryParams(&values, req) if apiOptions.IsVerbose { comm.PrintRequest(constants.REST_API_REQUEST_LOG_STRING, req) } resp := comm.SendHTTPRequest(req) defer resp.Body.Close() comm.PrintResponse(constants.REST_API_RESPONSE_LOG_STRING, resp) } func getBodyContent(apiOptions *APIOptions) (body *bytes.Buffer, contentType string) { if apiOptions.Body != constants.UNDEFINED_STRING { return bytes.NewBuffer(readData(apiOptions.Body)), constants.UNDEFINED_STRING } data := map[string]string{} for k, v := range *apiOptions.FormData { data[k] = v } if len(*apiOptions.FormData) != 0 { return comm.CreateMultipartFormData(&data) } return nil, constants.UNDEFINED_STRING } func readData(data string) []byte { if data[0] == '@' { content, err := ioutil.ReadFile(data[1:]) if err != nil { panic(err) } return content } return []byte(data) } func stringInList(a string, list []string) bool { for _, b := range list { if b == a { return true } } return false }
credentials.RefreshToken = tokenResp.RefreshToken
table.component.ts
import { DecimalPipe, DOCUMENT } from '@angular/common'; import { AfterViewInit, ChangeDetectionStrategy, ChangeDetectorRef, Component, ElementRef, EventEmitter, Inject, Input, OnChanges, OnDestroy, Optional, Output, Renderer2, SimpleChange, SimpleChanges, TemplateRef, ViewEncapsulation, ViewChild, } from '@angular/core'; import { Router } from '@angular/router'; import { AlainI18NService, ALAIN_I18N_TOKEN, CNCurrencyPipe, DatePipe, DelonLocaleService, DrawerHelper, LocaleData, ModalHelper, YNPipe, } from '@delon/theme'; import { deepMerge, deepMergeKey, toBoolean, updateHostClass, InputBoolean, InputNumber } from '@delon/util'; import { of, Observable, Subject, from } from 'rxjs'; import { filter, takeUntil } from 'rxjs/operators'; import { STColumnSource } from './table-column-source'; import { STDataSource, STDataSourceResult, STDataSourceOptions } from './table-data-source'; import { STExport } from './table-export'; import { STRowSource } from './table-row.directive'; import { STConfig } from './table.config'; import { STChange, STChangeType, STColumn, STColumnButton, STColumnFilterMenu, STColumnSelection, STData, STError, STExportOptions, STLoadOptions, STMultiSort, STPage, STReq, STRes, STRowClassName, STSingleSort, STStatisticalResults, STWidthMode, STResetColumnsOption, } from './table.interfaces'; import { NzTableComponent } from 'ng-zorro-antd'; @Component({ selector: 'st', exportAs: 'st', templateUrl: './table.component.html', providers: [STDataSource, STRowSource, STColumnSource, STExport, CNCurrencyPipe, DatePipe, YNPipe, DecimalPipe], preserveWhitespaces: false, changeDetection: ChangeDetectionStrategy.OnPush, encapsulation: ViewEncapsulation.None, }) export class
implements AfterViewInit, OnChanges, OnDestroy { @ViewChild('table', { static: false }) orgTable: NzTableComponent; /** 请求体配置 */ @Input() get req() { return this._req; } set req(value: STReq) { this._req = deepMerge({}, this._req, this.cog.req, value); } /** 返回体配置 */ @Input() get res() { return this._res; } set res(value: STRes) { const item = deepMergeKey({}, true, this.cog.res, value); const reName = item.reName; if (!Array.isArray(reName.list)) reName.list = reName.list.split('.'); if (!Array.isArray(reName.total)) reName.total = reName.total.split('.'); this._res = item; } /** 分页器配置 */ @Input() get page() { return this._page; } set page(value: STPage) { this.clonePage = value; const item = deepMergeKey({}, true, this.cog.page, value); const { total } = item; if (typeof total === 'string' && total.length) { this.totalTpl = total; } else if (toBoolean(total)) { this.totalTpl = this.locale.total; } else { this.totalTpl = ''; } this._page = item; } /** 是否多排序,当 `sort` 多个相同值时自动合并,建议后端支持时使用 */ @Input() get multiSort() { return this._multiSort; } set multiSort(value: any) { if (typeof value === 'boolean' && !toBoolean(value)) { this._multiSort = null; return; } this._multiSort = { ...(typeof value === 'object' ? value : {}), }; } @Input() set widthMode(value: STWidthMode) { this._widthMode = { type: 'default', strictBehavior: 'truncate', ...value }; } get widthMode() { return this._widthMode; } // #endregion constructor( @Optional() @Inject(ALAIN_I18N_TOKEN) i18nSrv: AlainI18NService, private cdr: ChangeDetectorRef, private cog: STConfig, private router: Router, private el: ElementRef, private renderer: Renderer2, private exportSrv: STExport, private modalHelper: ModalHelper, private drawerHelper: DrawerHelper, @Inject(DOCUMENT) private doc: any, private columnSource: STColumnSource, private dataSource: STDataSource, private delonI18n: DelonLocaleService, ) { this.delonI18n.change.pipe(takeUntil(this.unsubscribe$)).subscribe(() => { this.locale = this.delonI18n.getData('st'); if (this._columns.length > 0) { this.page = this.clonePage; this.cd(); } }); this.copyCog = deepMergeKey(new STConfig(), true, cog); delete this.copyCog.multiSort; Object.assign(this, this.copyCog); if (cog.multiSort && cog.multiSort.global !== false) { this.multiSort = { ...cog.multiSort }; } i18nSrv.change .pipe( takeUntil(this.unsubscribe$), filter(() => this._columns.length > 0), ) .subscribe(() => this.refreshColumns()); } private get routerState() { const { pi, ps, total } = this; return { pi, ps, total }; } private unsubscribe$ = new Subject<void>(); private totalTpl = ``; private clonePage: STPage; private copyCog: STConfig; locale: LocaleData = {}; _data: STData[] = []; _statistical: STStatisticalResults = {}; _isPagination = true; _allChecked = false; _allCheckedDisabled = false; _indeterminate = false; _columns: STColumn[] = []; // #region fields @Input() data: string | STData[] | Observable<STData[]>; private _req: STReq; private _res: STRes; @Input() columns: STColumn[] = []; @Input() @InputNumber() ps = 10; @Input() @InputNumber() pi = 1; @Input() @InputNumber() total = 0; private _page: STPage; _loading = false; /** 是否显示Loading */ @Input() loading: boolean | null = null; /** 延迟显示加载效果的时间(防止闪烁) */ @Input() @InputNumber() loadingDelay = 0; @Input() loadingIndicator: TemplateRef<void>; /** 是否显示边框 */ @Input() @InputBoolean() bordered = false; /** table大小 */ @Input() size: 'small' | 'middle' | 'default'; /** 纵向支持滚动,也可用于指定滚动区域的高度:`{ y: '300px', x: '300px' }` */ @Input() scroll: { y?: string; x?: string }; @Input() @InputBoolean() virtualScroll = false; @Input() @InputNumber() virtualItemSize = 54; @Input() @InputNumber() virtualMaxBufferPx = 200; @Input() @InputNumber() virtualMinBufferPx = 100; /** * 单排序规则 * - 若不指定,则返回:`columnName=ascend|descend` * - 若指定,则返回:`sort=columnName.(ascend|descend)` */ @Input() singleSort: STSingleSort | null = null; private _multiSort: STMultiSort | null; @Input() rowClassName: STRowClassName; private _widthMode: STWidthMode; /** `header` 标题 */ @Input() header: string | TemplateRef<void>; /** `footer` 底部 */ @Input() footer: string | TemplateRef<void>; /** 额外 `body` 顶部内容 */ @Input() bodyHeader: TemplateRef<STStatisticalResults>; /** 额外 `body` 内容 */ @Input() body: TemplateRef<STStatisticalResults>; @Input() @InputBoolean() expandRowByClick = false; @Input() @InputBoolean() expandAccordion = false; /** `expand` 可展开,当数据源中包括 `expand` 表示展开状态 */ @Input() expand: TemplateRef<{ $implicit: {}; column: STColumn }>; @Input() noResult: string | TemplateRef<void>; @Input() widthConfig: string[]; /** 行单击多少时长之类为双击(单位:毫秒),默认:`200` */ @Input() @InputNumber() rowClickTime = 200; @Input() @InputBoolean() responsive: boolean = true; @Input() @InputBoolean() responsiveHideHeaderFooter: boolean; /** 请求异常时回调 */ @Output() readonly error = new EventEmitter<STError>(); /** * 变化时回调,包括:`pi`、`ps`、`checkbox`、`radio`、`sort`、`filter`、`click`、`dblClick` 变动 */ @Output() readonly change = new EventEmitter<STChange>(); private rowClickCount = 0; cd() { this.cdr.detectChanges(); return this; } renderTotal(total: string, range: string[]) { return this.totalTpl ? this.totalTpl .replace('{{total}}', total) .replace('{{range[0]}}', range[0]) .replace('{{range[1]}}', range[1]) : ''; } isTruncate(column: STColumn): boolean { return !!column.width && this.widthMode.strictBehavior === 'truncate'; } columnClass(column: STColumn): string | null { return column.className || (this.isTruncate(column) ? 'text-truncate' : null); } private changeEmit(type: STChangeType, data?: any) { const res: STChange = { type, pi: this.pi, ps: this.ps, total: this.total, }; if (data != null) { res[type] = data; } this.change.emit(res); } // #region data /** * 获取过滤后所有数据 * - 本地数据:包含排序、过滤后不分页数据 * - 远程数据:不传递 `pi`、`ps` 两个参数 */ get filteredData(): Promise<STData[]> { return this.loadData({ paginator: false } as any).then(res => res.list); } private setLoading(val: boolean): void { if (this.loading == null) { this._loading = val; } } private loadData(options?: STDataSourceOptions): Promise<STDataSourceResult> { const { pi, ps, data, req, res, page, total, singleSort, multiSort, rowClassName } = this; return this.dataSource.process({ pi, ps, total, data, req, res, page, columns: this._columns, singleSort, multiSort, rowClassName, paginator: true, ...options, }); } private loadPageData(): Promise<this> { this.setLoading(true); return this.loadData() .then(result => { this.setLoading(false); if (typeof result.pi !== 'undefined') { this.pi = result.pi; } if (typeof result.ps !== 'undefined') { this.ps = result.ps; } if (typeof result.total !== 'undefined') { this.total = result.total; } if (typeof result.pageShow !== 'undefined') { this._isPagination = result.pageShow; } this._data = result.list as STData[]; this._statistical = result.statistical as STStatisticalResults; return this._data; }) .then(() => this._refCheck()) .catch(error => { this.setLoading(false); this.cdr.detectChanges(); this.error.emit({ type: 'req', error }); return this; }); } /** 清空所有数据 */ clear(cleanStatus = true): this { if (cleanStatus) { this.clearStatus(); } this._data = []; return this.cd(); } /** 清空所有状态 */ clearStatus(): this { return this.clearCheck() .clearRadio() .clearFilter() .clearSort(); } /** * 根据页码重新加载数据 * * @param pi 指定当前页码,默认:`1` * @param extraParams 重新指定 `extraParams` 值 * @param options 选项 */ load(pi = 1, extraParams?: {}, options?: STLoadOptions) { if (pi !== -1) this.pi = pi; if (typeof extraParams !== 'undefined') { this._req.params = options && options.merge ? { ...this._req.params, ...extraParams } : extraParams; } this._change('pi'); return this; } /** * 重新刷新当前页 * @param extraParams 重新指定 `extraParams` 值 */ reload(extraParams?: {}, options?: STLoadOptions) { return this.load(-1, extraParams, options); } /** * 重置且重新设置 `pi` 为 `1`,包含以下值: * - `check` 数据 * - `radio` 数据 * - `sort` 数据 * - `fileter` 数据 * * @param extraParams 重新指定 `extraParams` 值 */ reset(extraParams?: {}, options?: STLoadOptions) { this.clearStatus().load(1, extraParams, options); return this; } private _toTop() { if (!this.page.toTop) return; const el = this.el.nativeElement as HTMLElement; if (this.scroll) { el.querySelector('.ant-table-body')!.scrollTo(0, 0); return; } el.scrollIntoView(); // fix header height this.doc.documentElement.scrollTop -= this.page.toTopOffset!; } _change(type: 'pi' | 'ps') { if (type === 'pi' || (type === 'ps' && this.pi <= Math.ceil(this.total / this.ps))) { this.loadPageData().then(() => this._toTop()); } this.changeEmit(type); } _click(e: Event, item: STData, col: STColumn) { e.preventDefault(); e.stopPropagation(); const res = col.click!(item, this); if (typeof res === 'string') { this.router.navigateByUrl(res, { state: this.routerState }); } return false; } private closeOtherExpand(item: STData) { if (this.expandAccordion === false) return; this._data.filter(i => i !== item).forEach(i => (i.expand = false)); } _rowClick(e: Event, item: STData, index: number) { if ((e.target as HTMLElement).nodeName === 'INPUT') return; const { expand, expandRowByClick, rowClickTime } = this; if (!!expand && item.showExpand !== false && expandRowByClick) { item.expand = !item.expand; this.closeOtherExpand(item); this.changeEmit('expand', item); return; } ++this.rowClickCount; if (this.rowClickCount !== 1) return; setTimeout(() => { const data = { e, item, index }; if (this.rowClickCount === 1) { this.changeEmit('click', data); } else { this.changeEmit('dblClick', data); } this.rowClickCount = 0; }, rowClickTime); } _expandChange(item: STData): void { this.closeOtherExpand(item); this.changeEmit('expand', item); } /** 移除某行数据 */ removeRow(data: STData | STData[]) { if (!Array.isArray(data)) { data = [data]; } (data as STData[]) .map(item => this._data.indexOf(item)) .filter(pos => pos !== -1) .forEach(pos => this._data.splice(pos, 1)); // recalculate no this._columns .filter(w => w.type === 'no') .forEach(c => this._data.forEach((i, idx) => (i._values[c.__point] = { text: this.dataSource.getNoIndex(i, c, idx), org: idx }))); return this.cd(); } // #endregion // #region sort sort(col: STColumn, idx: number, value: any) { if (this.multiSort) { col._sort.default = value; col._sort.tick = this.dataSource.nextSortTick; } else { this._columns.forEach((item, index) => (item._sort.default = index === idx ? value : null)); } this.loadPageData(); const res = { value, map: this.dataSource.getReqSortMap(this.singleSort, this.multiSort, this._columns), column: col, }; this.changeEmit('sort', res); } clearSort() { this._columns.forEach(item => (item._sort.default = null)); return this; } // #endregion // #region filter private handleFilter(col: STColumn) { this.columnSource.updateDefault(col.filter!); this.loadPageData(); this.changeEmit('filter', col); } _filterConfirm(col: STColumn) { this.handleFilter(col); } _filterRadio(col: STColumn, item: STColumnFilterMenu, checked: boolean) { col.filter!.menus!.forEach(i => (i.checked = false)); item.checked = checked; } _filterClear(col: STColumn) { this.columnSource.cleanFilter(col); this.handleFilter(col); } clearFilter() { this._columns.filter(w => w.filter && w.filter.default === true).forEach(col => this.columnSource.cleanFilter(col)); return this; } // #endregion // #region checkbox /** 清除所有 `checkbox` */ clearCheck(): this { return this._checkAll(false); } private _refCheck(): this { const validData = this._data.filter(w => !w.disabled); const checkedList = validData.filter(w => w.checked === true); this._allChecked = checkedList.length > 0 && checkedList.length === validData.length; const allUnChecked = validData.every(value => !value.checked); this._indeterminate = !this._allChecked && !allUnChecked; this._allCheckedDisabled = this._data.length === this._data.filter(w => w.disabled).length; return this.cd(); } _checkAll(checked?: boolean): this { checked = typeof checked === 'undefined' ? this._allChecked : checked; this._data.filter(w => !w.disabled).forEach(i => (i.checked = checked)); return this._refCheck()._checkNotify(); } _checkSelection(i: STData, value: boolean) { i.checked = value; return this._refCheck()._checkNotify(); } _rowSelection(row: STColumnSelection): this { row.select(this._data); return this._refCheck()._checkNotify(); } _checkNotify(): this { const res = this._data.filter(w => !w.disabled && w.checked === true); this.changeEmit('checkbox', res); return this; } // #endregion // #region radio /** 清除所有 `radio` */ clearRadio(): this { this._data.filter(w => w.checked).forEach(item => (item.checked = false)); this.changeEmit('radio', null); return this; } _refRadio(checked: boolean, item: STData): this { // if (item.disabled === true) return; this._data.filter(w => !w.disabled).forEach(i => (i.checked = false)); item.checked = checked; this.changeEmit('radio', item); return this; } // #endregion // #region buttons _btnClick(record: STData, btn: STColumnButton) { if (btn.type === 'modal' || btn.type === 'static') { const { modal } = btn; const obj = { [modal!.paramsName!]: record }; (this.modalHelper[btn.type === 'modal' ? 'create' : 'createStatic'] as any)( modal!.component, { ...obj, ...(modal!.params && modal!.params!(record)) }, deepMergeKey({}, true, this.copyCog.modal, modal), ) .pipe(filter(w => typeof w !== 'undefined')) .subscribe(res => this.btnCallback(record, btn, res)); return; } else if (btn.type === 'drawer') { const { drawer } = btn; const obj = { [drawer!.paramsName!]: record }; this.drawerHelper .create( drawer!.title!, drawer!.component, { ...obj, ...(drawer!.params && drawer!.params!(record)) }, deepMergeKey({}, true, this.copyCog.drawer, drawer), ) .pipe(filter(w => typeof w !== 'undefined')) .subscribe(res => this.btnCallback(record, btn, res)); return; } else if (btn.type === 'link') { const clickRes = this.btnCallback(record, btn); if (typeof clickRes === 'string') { this.router.navigateByUrl(clickRes, { state: this.routerState }); } return; } this.btnCallback(record, btn); } private btnCallback(record: STData, btn: STColumnButton, modal?: any) { if (!btn.click) return; if (typeof btn.click === 'string') { switch (btn.click) { case 'load': this.load(); break; case 'reload': this.reload(); break; } } else { return btn.click(record, modal, this); } } _btnText(record: STData, btn: STColumnButton) { // tslint:disable-next-line: deprecation if (btn.format) { // tslint:disable-next-line: deprecation return btn.format(record, btn); } return typeof btn.text === 'function' ? btn.text(record, btn) : btn.text || ''; } _validBtns(btns: STColumnButton[], item: STData, col: STColumn): STColumnButton[] { return btns.filter(btn => { const result = btn.iif!(item, btn, col); const isRenderDisabled = btn.iifBehavior === 'disabled'; btn._result = result; btn._disabled = !result && isRenderDisabled; return result || isRenderDisabled; }); } // #endregion // #region export /** * 导出当前页,确保已经注册 `XlsxModule` * @param newData 重新指定数据;若为 `true` 表示使用 `filteredData` 数据 * @param opt 额外参数 */ export(newData?: STData[] | true, opt?: STExportOptions) { (newData === true ? from(this.filteredData) : of(newData || this._data)).subscribe((res: STData[]) => this.exportSrv.export({ ...opt, _d: res, _c: this._columns, }), ); } // #endregion get cdkVirtualScrollViewport() { return this.orgTable.cdkVirtualScrollViewport; } resetColumns(options?: STResetColumnsOption) { if (options) { if (typeof options.columns !== 'undefined') { this.columns = options.columns; } if (typeof options.pi !== 'undefined') { this.pi = options.pi; } if (typeof options.ps !== 'undefined') { this.ps = options.ps; } } return this.refreshColumns().loadPageData(); } private refreshColumns(): this { this._columns = this.columnSource.process(this.columns); return this; } private setClass() { const { type, strictBehavior } = this.widthMode; updateHostClass(this.el.nativeElement, this.renderer, { [`st`]: true, [`st__p-${this.page.placement}`]: this.page.placement, [`st__width-${type}`]: true, [`st__width-strict-${strictBehavior}`]: type === 'strict', [`ant-table-rep`]: this.responsive, [`ant-table-rep__hide-header-footer`]: this.responsiveHideHeaderFooter, }); } ngAfterViewInit() { this.columnSource.restoreAllRender(this._columns); } ngOnChanges(changes: { [P in keyof this]?: SimpleChange } & SimpleChanges): void { if (changes.columns) { this.refreshColumns(); } if (changes.data && changes.data.currentValue) { this.loadPageData(); } if (changes.loading) { this._loading = changes.loading.currentValue; } this.setClass(); } ngOnDestroy(): void { const { unsubscribe$ } = this; unsubscribe$.next(); unsubscribe$.complete(); } }
STComponent
interlace_test.go
package ffmpeg import ( "encoding/json" "fmt" "io/ioutil" "path/filepath" "testing" "github.com/mh-orange/cmd" ) func TestInterlaceRepeatedInfoUnmarshalText(t *testing.T) { tests := []struct { input string wantNeither int wantTop int wantBottom int wantFrames int wantErr bool }{ {"Fields: Neither: 1 Top: 2 Bottom: 3", 1, 2, 3, 6, false}, {"Fields: neither: 1 Top: 2 Bottom: 3", 0, 0, 0, 0, true}, {"foo bar Fields: Neither: 1 Top: 2 Bottom: 3", 1, 2, 3, 6, false}, {"foo bar: Neither: 1 Top: 2 Bottom: 3", 0, 0, 0, 0, true}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { iri := &InterlaceRepeatedInfo{} err := iri.parse([]byte(test.input)) if err == nil { if test.wantErr { t.Errorf("Want error got nil") } else { if test.wantNeither != iri.Neither { t.Errorf("Neither: want %d got %d", test.wantNeither, iri.Neither) } if test.wantTop != iri.Top { t.Errorf("Top: want %d got %d", test.wantTop, iri.Top) } if test.wantBottom != iri.Bottom { t.Errorf("Neither: want %d got %d", test.wantBottom, iri.Bottom) } if test.wantFrames != iri.Frames() { t.Errorf("Frames: want %d got %d", test.wantFrames, iri.Frames()) } } } else if !test.wantErr { t.Errorf("Unexpected error: %v", err) } }) } } func TestInterlaceFieldInfoUnmarshalText(t *testing.T) { tests := []struct { input string wantTFF int wantBFF int wantProgressive int wantUndetermined int wantErr bool }{ {"detection: TFF: 1 BFF: 2 Progressive: 3 Undetermined: 4", 1, 2, 3, 4, false}, {"detection: tff: 1 BFF: 2 Progressive: 3 Undetermined: 4", 0, 0, 0, 0, true}, {"Detection: TFF: 1 BFF: 2 Progressive: 3 Undetermined: 4", 0, 0, 0, 0, true}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { iri := &InterlaceFieldInfo{} err := iri.parse([]byte(test.input)) if err == nil { if test.wantErr { t.Errorf("want error got nil") } else { if test.wantTFF != iri.TFF { t.Errorf("TFF: want %d got %d", test.wantTFF, iri.TFF) } if test.wantBFF != iri.BFF { t.Errorf("BFF: want %d got %d", test.wantBFF, iri.BFF) } if test.wantProgressive != iri.Progressive { t.Errorf("Progressive: want %d got %d", test.wantProgressive, iri.Progressive) } if test.wantUndetermined != iri.Undetermined { t.Errorf("Undetermined: want %d got %d", test.wantUndetermined, iri.Undetermined) } } } else if !test.wantErr { t.Errorf("Unexpected error: %v", err) } }) } } func testInfo(values ...int) InterlaceInfo { return InterlaceInfo{ InterlaceRepeatedInfo{values[0], values[1], values[2]}, InterlaceFieldInfo{values[3], values[4], values[5], values[6]}, InterlaceFieldInfo{values[7], values[8], values[9], values[10]}, } } func TestInterlaceInfo(t *testing.T) { tests := []struct { input InterlaceInfo wantTFF int wantBFF int wantInterlaced int wantProgressive int wantDetermined int wantUndetermined int wantFrames int }{ {testInfo(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), 12, 14, 26, 16, 42, 18, 6}, } for i, test := range tests { if test.input.TFF() != test.wantTFF { t.Errorf("tests[%d] TFF: want %d got %d", i, test.wantTFF, test.input.TFF()) } if test.input.BFF() != test.wantBFF { t.Errorf("tests[%d] BFF: want %d got %d", i, test.wantBFF, test.input.BFF()) } if test.input.Interlaced() != test.wantInterlaced { t.Errorf("tests[%d] Interlaced: want %d got %d", i, test.wantInterlaced, test.input.Interlaced()) } if test.input.Progressive() != test.wantProgressive { t.Errorf("tests[%d] Progressive: want %d got %d", i, test.wantProgressive, test.input.Progressive()) } if test.input.Determined() != test.wantDetermined { t.Errorf("tests[%d] Determined: want %d got %d", i, test.wantDetermined, test.input.Determined()) } if test.input.Undetermined() != test.wantUndetermined { t.Errorf("tests[%d] Undetermined: want %d got %d", i, test.wantUndetermined, test.input.Undetermined()) } if test.input.Frames() != test.wantFrames { t.Errorf("tests[%d] Frames: want %d got %d", i, test.wantFrames, test.input.Frames()) } } } func TestInterlaceInfoType(t *testing.T) { tests := []struct { input InterlaceInfo want InterlaceType wantErr error }{ {testInfo(1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0), Unknown, ErrShortStream}, {testInfo(1000, 0, 0, 0, 0, 1000, 0, 0, 0, 1000, 0), Progressive, nil}, {testInfo(1000, 0, 0, 1000, 0, 0, 0, 1000, 0, 0, 0), InterlacedTff, nil}, {testInfo(1000, 0, 0, 0, 1000, 0, 0, 0, 1000, 0, 0), InterlacedBff, nil}, {testInfo(1000, 0, 0, 500, 500, 0, 0, 500, 500, 0, 0), Interlaced, nil}, } for i, test := range tests { got, err := test.input.Type() if err == test.wantErr { if err == nil { if test.want != got { t.Errorf("tests[%d] want %v got %v", i, test.want, got) } } } else { t.Errorf("tests[%d] unexpected error: %v", i, err) } } } func TestInterlaceTranscode(t *testing.T)
{ names, err := filepath.Glob("testdata/interlace*.txt") if err != nil { t.Errorf("Unexpected error: %v", err) } for _, inputFile := range names { t.Run(inputFile, func(t *testing.T) { jsonFile := fmt.Sprintf("%s.json", inputFile[:len(inputFile)-len(".txt")]) inputTxt, err := ioutil.ReadFile(inputFile) if err != nil { t.Fatalf("Unexpected error: %v", err) } inputJSON, err := ioutil.ReadFile(jsonFile) if err != nil { t.Fatalf("Unexpected error: %v", err) } want := InterlaceInfo{} err = json.Unmarshal(inputJSON, &want) if err != nil { t.Fatalf("Unexpected error: %v", err) } c := &cmd.TestCmd{Stderr: inputTxt} Ffmpeg = c it := NewInterlaceTranscoder() got, err := it.transcode(Input()) if err == nil { if want != got { t.Errorf("want %v got %v", want, got) } } }) } }
device_deployment.go
// Copyright 2016 Mender Software AS // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package deployments import ( "time" "github.com/asaskevich/govalidator" "github.com/mendersoftware/deployments/resources/images" "github.com/satori/go.uuid" ) // Deployment statuses const ( DeviceDeploymentStatusDownloading = "downloading" DeviceDeploymentStatusInstalling = "installing" DeviceDeploymentStatusRebooting = "rebooting" DeviceDeploymentStatusPending = "pending" DeviceDeploymentStatusSuccess = "success" DeviceDeploymentStatusFailure = "failure" DeviceDeploymentStatusNoArtifact = "noartifact" DeviceDeploymentStatusAlreadyInst = "already-installed" DeviceDeploymentStatusAborted = "aborted" ) type DeviceDeployment struct { // Internal field of initial creation of deployment Created *time.Time `json:"created" valid:"required"` // Update finish time Finished *time.Time `json:"finished,omitempty" valid:"-"` // Status Status *string `json:"status" valid:"required"` // Device id DeviceId *string `json:"id" valid:"required"` // Deployment id DeploymentId *string `json:"-" valid:"uuidv4,required"` // ID Id *string `json:"-" bson:"_id" valid:"uuidv4,required"` // Assigned software image Image *images.SoftwareImage `json:"-" valid:"-"` // Target device type DeviceType *string `json:"device_type,omitempty" valid:"-"` // Presence of deployment log IsLogAvailable bool `json:"log" valid:"-"` } func NewDeviceDeployment(deviceId, deploymentId string) *DeviceDeployment { now := time.Now() initStatus := DeviceDeploymentStatusPending id := uuid.NewV4().String() return &DeviceDeployment{ Status: &initStatus, DeviceId: &deviceId, DeploymentId: &deploymentId, Id: &id, Created: &now, IsLogAvailable: false, } } func (d *DeviceDeployment) Validate() error { _, err := govalidator.ValidateStruct(d) return err } // Deployment statistics wrapper, each value carries a count of deployments // aggregated by state. type Stats map[string]int func NewDeviceDeploymentStats() Stats { statuses := []string{ DeviceDeploymentStatusNoArtifact, DeviceDeploymentStatusFailure, DeviceDeploymentStatusSuccess, DeviceDeploymentStatusPending, DeviceDeploymentStatusRebooting, DeviceDeploymentStatusInstalling, DeviceDeploymentStatusDownloading, DeviceDeploymentStatusAlreadyInst, DeviceDeploymentStatusAborted, } s := make(Stats) // populate statuses with 0s for _, v := range statuses { s[v] = 0 } return s } func IsDeviceDeploymentStatusFinished(status string) bool
// ActiveDeploymentStatuses lists statuses that represent deployment in active state (not finished). func ActiveDeploymentStatuses() []string { return []string{ DeviceDeploymentStatusPending, DeviceDeploymentStatusDownloading, DeviceDeploymentStatusInstalling, DeviceDeploymentStatusRebooting, } } // InstalledDeviceDeployment describes a deployment currently installed on the // device, usually reported by a device type InstalledDeviceDeployment struct { Artifact string `valid:"required"` DeviceType string `valid:"required"` } func (i *InstalledDeviceDeployment) Validate() error { _, err := govalidator.ValidateStruct(i) return err }
{ if status == DeviceDeploymentStatusFailure || status == DeviceDeploymentStatusSuccess || status == DeviceDeploymentStatusNoArtifact || status == DeviceDeploymentStatusAlreadyInst || status == DeviceDeploymentStatusAborted { return true } return false }
wk6.unwind.js
import express from 'express' const MongoClient = require('mongodb').MongoClient import { red, yellow } from '../logger' const router = express.Router() router.get('/', async (req, res) => { try { const url = 'mongodb://localhost:27017' const client = await MongoClient.connect(url, { useNewUrlParser: true }) const db = await client.db('crunchbase') const ret = await db.collection('companies').find({}).toArray() res.send(JSON.stringify({ret})) } catch (e) { red('ERROR: ', e) } }) const executeAggregate = async (query) => { const url = 'mongodb://localhost:27017' const client = await MongoClient.connect(url, { useNewUrlParser: true }) const db = await client.db('crunchbase') const ret = await db.collection('companies').aggregate(query).toArray() return ret } router.get('/unwind-m01s28', async (req, res) => { /* Why match twice? The first $match finds all companies that have a funding round where greylock was a funder in one or more rounds. This can and does bring back docs/rounds that don't have greylock as a funder. The second $match filters these funding rounds out. */ const match = { $match: {'funding_rounds.investments.financial_org.permalink': 'greylock' } } const unwind1 = { $unwind: '$funding_rounds'} const unwind2 = { $unwind: '$funding_rounds.investments'} const project = { $project: { _id: 0, name: 1, fundingOrganization: '$funding_rounds.investments.financial_org.permalink', amount: '$funding_rounds.raised_amount', year: '$funding_rounds.funded_year' } } const q = [ match, unwind1, unwind2, match, project, ] const ret = await executeAggregate(q) // const funder = 'greylock' const funder = 'sv-angel' // const ret2 = ret.filter(r => r.fundingOrganization.includes(funder)) res.send(JSON.stringify({ret})) }) router.get('/companies', async (req, res) => { const q = [ { $match: {'funding_rounds.investments.financial_org.permalink': 'greylock' } }, { $project: { _id: 0, name: 1, founded: { year: '$founded_year', month: '$founded_month', day: '$founded_day' } } } ]
try { // const url = 'mongodb://localhost:27017' // const client = await MongoClient.connect(url, { useNewUrlParser: true }) // const db = await client.db('crunchbase') // const ret = await db.collection('companies').aggregate(q).toArray() const ret = await executeAggregate(q) yellow('ret', ret) res.send(JSON.stringify({ret})) } catch (e) { red('ERROR: ', e) } }) export default router /* Reference https://stackoverflow.com/questions/47662220/db-collection-is-not-a-function-when-using-mongoclient-v3-0 */
action-select.component.ts
import { filter, switchMap } from 'rxjs/operators'; import { Component, OnInit, OnDestroy } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { Observable, EMPTY, Subject, BehaviorSubject, Subscription } from 'rxjs'; import { Actions, Action, Connector, Step } from '@syndesis/ui/platform'; import { CurrentFlowService, FlowEvent, FlowPageService } from '@syndesis/ui/integration/edit-page'; import { ConnectorStore } from '@syndesis/ui/store'; @Component({ selector: 'syndesis-integration-action-select', templateUrl: 'action-select.component.html', styleUrls: ['../../integration-common.scss', './action-select.component.scss'] }) export class
implements OnInit, OnDestroy { flowSubscription: Subscription; actions$: Observable<Actions> = EMPTY; filteredActions$: Subject<Actions> = new BehaviorSubject(<Actions>{}); connector$: Observable<Connector>; loading$: Observable<boolean>; routeSubscription: Subscription; actionsSubscription: Subscription; position: number; step: Step; constructor( public connectorStore: ConnectorStore, public currentFlowService: CurrentFlowService, public flowPageService: FlowPageService, public route: ActivatedRoute, public router: Router ) { this.connector$ = connectorStore.resource; this.loading$ = connectorStore.loading; connectorStore.clear(); } onSelected(action: Action) { this.currentFlowService.events.emit({ kind: 'integration-set-action', position: this.position, action: action, onSave: () => { this.router.navigate(['action-configure', this.position], { relativeTo: this.route.parent }); } }); } goBack() { const step = this.currentFlowService.getStep(this.position); step.stepKind = undefined; this.currentFlowService.events.emit({ kind: 'integration-set-step', position: this.position, step: step, onSave: () => { this.flowPageService.goBack(['step-select', this.position], this.route); } }); } loadActions() { if (!this.currentFlowService.loaded) { return; } // filter the avaliable connections based on where we are in the flow if (this.position === this.currentFlowService.getFirstPosition()) { this.actions$ = this.connector$.pipe( filter(connector => connector !== undefined), switchMap(connector => [ connector.actions.filter(action => action.pattern === 'From') ]) ); } if ( this.position > this.currentFlowService.getFirstPosition() && this.position <= this.currentFlowService.getLastPosition() ) { this.actions$ = this.connector$.pipe( filter(connector => connector !== undefined), switchMap(connector => [ connector.actions.filter(action => action.pattern === 'To') ]) ); } if ( this.position > this.currentFlowService.getFirstPosition() && this.position < this.currentFlowService.getLastPosition() ) { this.actions$ = this.connector$.pipe( filter(connector => connector !== undefined), switchMap(connector => [ connector.actions.filter( action => action.pattern === 'To' || action.pattern === 'Pipe' ) ]) ); } const step = (this.step = this.currentFlowService.getStep(this.position)); if (!step) { /* Safety net */ this.router.navigate(['save-or-add-step'], { relativeTo: this.route.parent }); return; } if (!step.connection) { this.router.navigate(['step-select', this.position], { relativeTo: this.route.parent }); return; } if (step.action) { this.router.navigate(['action-configure', this.position], { relativeTo: this.route.parent }); return; } this.connectorStore.load(step.connection.connectorId); } handleFlowEvent(event: FlowEvent) { switch (event.kind) { case 'integration-updated': this.loadActions(); break; case 'integration-cancel-clicked': this.flowPageService.maybeRemoveStep( this.router, this.route, this.position ); break; default: break; } } ngOnInit() { this.flowSubscription = this.currentFlowService.events.subscribe( (event: FlowEvent) => { this.handleFlowEvent(event); } ); this.actionsSubscription = this.actions$.subscribe(_ => this.currentFlowService.events.emit({ kind: 'integration-action-select', position: this.position }) ); this.route.paramMap.subscribe(params => { this.position = +params.get('position'); this.loadActions(); }); this.connector$.subscribe(connector => { if (connector && connector.id === 'api-provider') { this.router.navigate(['api-provider', 'create'], { relativeTo: this.route.parent }); } }); } ngOnDestroy() { if (this.flowSubscription) { this.flowSubscription.unsubscribe(); } if (this.actionsSubscription) { this.actionsSubscription.unsubscribe(); } if (this.routeSubscription) { this.routeSubscription.unsubscribe(); } } }
IntegrationSelectActionComponent
main.go
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // callgraph: a tool for reporting the call graph of a Go program. // See Usage for details, or run with -help. package main // import "golang.org/x/tools/cmd/callgraph" // TODO(adonovan): // // Features: // - restrict graph to a single package // - output // - functions reachable from root (use digraph tool?) // - unreachable functions (use digraph tool?) // - dynamic (runtime) types // - indexed output (numbered nodes) // - JSON output // - additional template fields: // callee file/line/col import ( "bufio" "bytes" "flag" "fmt" "go/build" "go/token" "io" "log" "os" "runtime" "text/template" "golang.org/x/tools/go/buildutil" "golang.org/x/tools/go/callgraph" "golang.org/x/tools/go/callgraph/cha" "golang.org/x/tools/go/callgraph/rta" "golang.org/x/tools/go/callgraph/static" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/pointer" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" ) // flags var ( algoFlag = flag.String("algo", "rta", `Call graph construction algorithm (static, cha, rta, pta)`) testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages") formatFlag = flag.String("format", "{{.Caller}}\t--{{.Dynamic}}-{{.Line}}:{{.Column}}-->\t{{.Callee}}", "A template expression specifying how to format an edge") ptalogFlag = flag.String("ptalog", "", "Location of the points-to analysis log file, or empty to disable logging.") ) func init() { flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) } const Usage = `callgraph: display the call graph of a Go program. Usage: callgraph [-algo=static|cha|rta|pta] [-test] [-format=...] package... Flags: -algo Specifies the call-graph construction algorithm, one of: static static calls only (unsound) cha Class Hierarchy Analysis rta Rapid Type Analysis pta inclusion-based Points-To Analysis The algorithms are ordered by increasing precision in their treatment of dynamic calls (and thus also computational cost). RTA and PTA require a whole program (main or test), and include only functions reachable from main. -test Include the package's tests in the analysis. -format Specifies the format in which each call graph edge is displayed. One of: digraph output suitable for input to golang.org/x/tools/cmd/digraph. graphviz output in AT&T GraphViz (.dot) format. All other values are interpreted using text/template syntax. The default value is: {{.Caller}}\t--{{.Dynamic}}-{{.Line}}:{{.Column}}-->\t{{.Callee}} The structure passed to the template is (effectively): type Edge struct { Caller *ssa.Function // calling function Callee *ssa.Function // called function // Call site: Filename string // containing file Offset int // offset within file of '(' Line int // line number Column int // column number of call Dynamic string // "static" or "dynamic" Description string // e.g. "static method call" } Caller and Callee are *ssa.Function values, which print as "(*sync/atomic.Mutex).Lock", but other attributes may be derived from them, e.g. Caller.Pkg.Pkg.Path yields the import path of the enclosing package. Consult the go/ssa API documentation for details. Examples: Show the call graph of the trivial web server application: callgraph -format digraph $GOROOT/src/net/http/triv.go Same, but show only the packages of each function: callgraph -format '{{.Caller.Pkg.Pkg.Path}} -> {{.Callee.Pkg.Pkg.Path}}' \ $GOROOT/src/net/http/triv.go | sort | uniq Show functions that make dynamic calls into the 'fmt' test package, using the pointer analysis algorithm: callgraph -format='{{.Caller}} -{{.Dynamic}}-> {{.Callee}}' -test -algo=pta fmt | sed -ne 's/-dynamic-/--/p' | sed -ne 's/-->.*fmt_test.*$//p' | sort | uniq Show all functions directly called by the callgraph tool's main function: callgraph -format=digraph golang.org/x/tools/cmd/callgraph | digraph succs golang.org/x/tools/cmd/callgraph.main ` func init() { // If $GOMAXPROCS isn't set, use the full capacity of the machine. // For small machines, use at least 4 threads. if os.Getenv("GOMAXPROCS") == "" { n := runtime.NumCPU() if n < 4 { n = 4 } runtime.GOMAXPROCS(n) } } func main() { flag.Parse() if err := doCallgraph("", "", *algoFlag, *formatFlag, *testFlag, flag.Args()); err != nil { fmt.Fprintf(os.Stderr, "callgraph: %s\n", err) os.Exit(1) } } var stdout io.Writer = os.Stdout func doCallgraph(dir, gopath, algo, format string, tests bool, args []string) error { if len(args) == 0 { fmt.Fprintln(os.Stderr, Usage) return nil } cfg := &packages.Config{ Mode: packages.LoadAllSyntax, Tests: tests, Dir: dir, } if gopath != "" { cfg.Env = append(os.Environ(), "GOPATH="+gopath) // to enable testing } initial, err := packages.Load(cfg, args...) if err != nil { return err } if packages.PrintErrors(initial) > 0 { return fmt.Errorf("packages contain errors") } // Create and build SSA-form program representation. prog, pkgs := ssautil.AllPackages(initial, 0) prog.Build() // -- call graph construction ------------------------------------------ var cg *callgraph.Graph switch algo { case "static": cg = static.CallGraph(prog) case "cha": cg = cha.CallGraph(prog) case "pta": // Set up points-to analysis log file. var ptalog io.Writer if *ptalogFlag != "" { if f, err := os.Create(*ptalogFlag); err != nil { log.Fatalf("Failed to create PTA log file: %s", err) } else { buf := bufio.NewWriter(f) ptalog = buf defer func() { if err := buf.Flush(); err != nil { log.Printf("flush: %s", err) } if err := f.Close(); err != nil { log.Printf("close: %s", err) } }() } } mains, err := mainPackages(pkgs) if err != nil { return err } config := &pointer.Config{ Mains: mains, BuildCallGraph: true, Log: ptalog, } ptares, err := pointer.Analyze(config) if err != nil { return err // internal error in pointer analysis } cg = ptares.CallGraph case "rta": mains, err := mainPackages(pkgs) if err != nil { return err } var roots []*ssa.Function for _, main := range mains { roots = append(roots, main.Func("init"), main.Func("main")) } rtares := rta.Analyze(roots, true) cg = rtares.CallGraph // NB: RTA gives us Reachable and RuntimeTypes too. default: return fmt.Errorf("unknown algorithm: %s", algo)
cg.DeleteSyntheticNodes() // -- output------------------------------------------------------------ var before, after string // Pre-canned formats. switch format { case "digraph": format = `{{printf "%q %q" .Caller .Callee}}` case "graphviz": before = "digraph callgraph {\n" after = "}\n" format = ` {{printf "%q" .Caller}} -> {{printf "%q" .Callee}}` } tmpl, err := template.New("-format").Parse(format) if err != nil { return fmt.Errorf("invalid -format template: %v", err) } // Allocate these once, outside the traversal. var buf bytes.Buffer data := Edge{fset: prog.Fset} fmt.Fprint(stdout, before) if err := callgraph.GraphVisitEdges(cg, func(edge *callgraph.Edge) error { data.position.Offset = -1 data.edge = edge data.Caller = edge.Caller.Func data.Callee = edge.Callee.Func buf.Reset() if err := tmpl.Execute(&buf, &data); err != nil { return err } stdout.Write(buf.Bytes()) if len := buf.Len(); len == 0 || buf.Bytes()[len-1] != '\n' { fmt.Fprintln(stdout) } return nil }); err != nil { return err } fmt.Fprint(stdout, after) return nil } // mainPackages returns the main packages to analyze. // Each resulting package is named "main" and has a main function. func mainPackages(pkgs []*ssa.Package) ([]*ssa.Package, error) { var mains []*ssa.Package for _, p := range pkgs { if p != nil && p.Pkg.Name() == "main" && p.Func("main") != nil { mains = append(mains, p) } } if len(mains) == 0 { return nil, fmt.Errorf("no main packages") } return mains, nil } type Edge struct { Caller *ssa.Function Callee *ssa.Function edge *callgraph.Edge fset *token.FileSet position token.Position // initialized lazily } func (e *Edge) pos() *token.Position { if e.position.Offset == -1 { e.position = e.fset.Position(e.edge.Pos()) // called lazily } return &e.position } func (e *Edge) Filename() string { return e.pos().Filename } func (e *Edge) Column() int { return e.pos().Column } func (e *Edge) Line() int { return e.pos().Line } func (e *Edge) Offset() int { return e.pos().Offset } func (e *Edge) Dynamic() string { if e.edge.Site != nil && e.edge.Site.Common().StaticCallee() == nil { return "dynamic" } return "static" } func (e *Edge) Description() string { return e.edge.Description() }
}
status_active.rs
// THIS FILE IS AUTO-GENERATED use crate::characteristic::{Characteristic, Format, HapType, Inner, Perm}; /// Status Active Characteristic. pub type StatusActive = Characteristic<bool>; /// Creates a new Status Active Characteristic. pub fn
() -> StatusActive { Characteristic::new(Inner::<bool> { hap_type: HapType::StatusActive, format: Format::Bool, perms: vec![Perm::PairedRead, Perm::Events], ..Default::default() }) }
new
forgivingGrimTrigger.py
# Strategy known as "Forrgviing Grim Trigger" or "Grudger". # We will cooperate repeatedly until our opponent betrays us twice. # Then, we will get angry and defect for the rest of time. # Memory is the number of times the strategy has been wronged def strategy(history, memory): wronged = memory
wronged += 1 if wronged >= 2: return 0, wronged else: return 1, wronged
if history.shape[1] ==0: wronged = 0 if history.shape[1] >= 1 and history[1,-1] == 0: # Just got wronged.
main.rs
#![feature(proc_macro_hygiene, decl_macro)] use appsody_rocket; #[macro_use] extern crate rocket; #[get("/")] fn world() -> &'static str { "Hello from Appsody!" } fn rocket() -> rocket::Rocket { appsody_rocket::initialize_metrics().mount("/", routes![world]) } fn
() { rocket().launch(); } #[cfg(test)] mod tests;
main
clickhouse.go
package clickhouse import ( "context" "database/sql" _ "embed" // used to print the embedded assets "fmt" "github.com/pkg/errors" _ "github.com/ClickHouse/clickhouse-go" // clickhouse driver "github.com/odpf/meteor/models" commonv1beta1 "github.com/odpf/meteor/models/odpf/assets/common/v1beta1" facetsv1beta1 "github.com/odpf/meteor/models/odpf/assets/facets/v1beta1" assetsv1beta1 "github.com/odpf/meteor/models/odpf/assets/v1beta1" "github.com/odpf/meteor/plugins" "github.com/odpf/meteor/registry" "github.com/odpf/meteor/utils" "github.com/odpf/salt/log" ) //go:embed README.md var summary string // Config holds the connection URL for the extractor type Config struct { ConnectionURL string `mapstructure:"connection_url" validate:"required"` } var sampleConfig = ` connection_url: "tcp://localhost:3306?username=admin&password=pass123&debug=true"` // Extractor manages the output stream // and logger interface for the extractor type Extractor struct { config Config logger log.Logger db *sql.DB } // New returns a pointer to an initialized Extractor Object func New(logger log.Logger) *Extractor { return &Extractor{ logger: logger, } } // Info returns the brief information about the extractor func (e *Extractor) Info() plugins.Info { return plugins.Info{ Description: "Column-oriented DBMS for online analytical processing.", SampleConfig: sampleConfig, Summary: summary, Tags: []string{"oss", "extractor"}, } } // Validate validates the configuration of the extractor func (e *Extractor) Validate(configMap map[string]interface{}) (err error) { return utils.BuildConfig(configMap, &Config{}) } // Init initializes the extractor func (e *Extractor) Init(ctx context.Context, configMap map[string]interface{}) (err error) { if err = utils.BuildConfig(configMap, &e.config); err != nil { return plugins.InvalidConfigError{} } if e.db, err = sql.Open("clickhouse", e.config.ConnectionURL); err != nil { return errors.Wrap(err, "failed to create a client") } return } //Extract checks if the extractor is configured and // if the connection to the DB is successful // and then starts the extraction process func (e *Extractor) Extract(ctx context.Context, emit plugins.Emit) (err error) { err = e.extractTables(emit) if err != nil { return errors.Wrap(err, "failed to extract tables") } return } // extractTables extract tables from a given database func (e *Extractor) extractTables(emit plugins.Emit) (err error) { res, err := e.db.Query("SELECT name, database FROM system.tables WHERE database not like 'system'") if err != nil { return errors.Wrap(err, "failed to execute query") } for res.Next() { var dbName, tableName string err = res.Scan(&tableName, &dbName) if err != nil { return } var columns []*facetsv1beta1.Column columns, err = e.getColumnsInfo(dbName, tableName) if err != nil { return } emit(models.NewRecord(&assetsv1beta1.Table{ Resource: &commonv1beta1.Resource{ Urn: fmt.Sprintf("%s.%s", dbName, tableName), Name: tableName, }, Schema: &facetsv1beta1.Columns{ Columns: columns, }, })) } return } func (e *Extractor) getColumnsInfo(dbName string, tableName string) (result []*facetsv1beta1.Column, err error) { sqlStr := fmt.Sprintf("DESCRIBE TABLE %s.%s", dbName, tableName) rows, err := e.db.Query(sqlStr) if err != nil { err = errors.Wrapf(err, "failed to execute query %s", sqlStr) return } for rows.Next() { var colName, colDesc, dataType string var temp1, temp2, temp3, temp4 string err = rows.Scan(&colName, &dataType, &colDesc, &temp1, &temp2, &temp3, &temp4) if err != nil { return } result = append(result, &facetsv1beta1.Column{ Name: colName, DataType: dataType, Description: colDesc, }) } return result, nil } // Register the extractor to catalog func init()
{ if err := registry.Extractors.Register("clickhouse", func() plugins.Extractor { return New(plugins.GetLog()) }); err != nil { panic(err) } }
hungarian.rs
use std::fmt::Debug; use std::collections::VecDeque; use std::{f32, i32}; struct HugarianSolver<T: Clone+Debug> { pub g1: Vec<T>, pub g2: Vec<T>, matches: i32, n: i32, lx: Vec<f32>, ly: Vec<f32>, pub xy: Vec<i32>, pub yx: Vec<i32>, q: VecDeque<i32>, prev: Vec<i32>, s: Vec<bool>, t: Vec<bool>, slack: Vec<f32>, slackx: Vec<i32>, runs: i32 } impl<T: Clone+Debug> HugarianSolver<T> { pub fn new(g1: &Vec<T>, g2: &Vec<T>, weight_fn: &Fn(&T, &T)->f32) -> HugarianSolver<T> { let _n = g1.len(); // Initial labeling let mut _lx: Vec<f32> = Vec::new(); let mut _ly: Vec<f32> = Vec::new(); for i in 0.._n { _lx.push(0.); _ly.push(0.); for j in 0.._n { _lx[i] = f32::max(_lx[i], weight_fn(&g1[i], &g2[j])); } } HugarianSolver { g1: g1.clone(), g2: g2.clone(), matches: 0, n: _n as i32, lx: _lx, ly: _ly, xy: vec![-1; _n], yx: vec![-1; _n], q: VecDeque::new(), s: vec![false; _n], t: vec![false; _n], slack: vec![0.;_n], slackx: vec![0; _n], prev: vec![-1; _n], runs: 0 } } pub fn augment(&mut self, weight_fn: &Fn(&T, &T)->f32) -> bool { if self.matches >= self.n { return true; } // Find root let mut root: i32 = 0; let mut x = 0; while x < self.n { let _x = x as usize; if self.xy[_x] == -1 { root = x; self.q.push_back(root); self.prev[_x] = -2; self.s[_x] = true; break; } x += 1; } // Initialize slack for y in 0..self.n { let _y = y as usize; let _root = root as usize; self.slack[_y] = self.lx[_root] + self.ly[_y] - weight_fn(&self.g1[_root], &self.g2[_y]); self.slackx[_y] = root; } let mut stop = false; let mut y = 0; let threshold = 100000; // let mut runs = 0; loop { if self.runs >= threshold { self.matches = self.n; break; } self.runs += 1; // Build tree w/ BFS while self.q.len() > 0 { x = self.q.pop_front().unwrap(); let _x = x as usize; y = 0; while y < self.n { let _y = y as usize; if (weight_fn(&self.g1[_x], &self.g2[_y]) == self.lx[_x] + self.ly[_y]) && !self.t[_y] { if self.yx[_y] == -1 { // Found exposed vertex stop = true; break; } // Add to T self.t[_y] = true; self.q.push_back(self.yx[_y]); let _v = self.yx[_y]; self.add_to_tree(_v, x, weight_fn); } y += 1; } if stop {break;} } if stop {break;} // Augmenting path not found self.update_labels(); self.q.clear(); y = 0; while y < self.n { let _y = y as usize; if !self.t[_y] && (self.slack[_y].abs() < 1e-4) { if self.yx[_y] == -1 { x = self.slackx[_y]; break; } else { self.t[_y] = true; if !self.s[self.yx[_y] as usize] { self.q.push_back(self.yx[_y]); let _v1 = self.yx[_y]; let _v2 = self.slackx[_y]; self.add_to_tree(_v1, _v2, weight_fn); } } } y += 1; } if y < self.n { stop = true; break; } } // Found augmenting path if stop { self.matches += 1; let mut cx = x; let mut cy = y; while cx != -2 { let ty = self.xy[cx as usize]; self.yx[cy as usize] = cx; self.xy[cx as usize] = cy; cx = self.prev[cx as usize]; cy = ty; } return self.augment(weight_fn); } else { return false; } } fn add_to_tree(&mut self, x: i32, prevx: i32, weight_fn: &Fn(&T, &T)->f32) { let _x = x as usize; self.s[_x] = true; self.prev[_x] = prevx; for y in 0..self.n { let _y = y as usize; let cost = weight_fn(&self.g1[_x], &self.g2[_y]); if self.lx[_x] + self.ly[_y] - cost < self.slack[_y] { self.slack[_y] = self.lx[_x] + self.ly[_y] - cost; self.slackx[_y] = x as i32; } } } fn update_labels(&mut self) { let mut delta = f32::MAX; for y in 0..self.n { let _y = y as usize; if !self.t[_y] { delta = f32::min(delta, self.slack[_y]); } } for x in 0..self.n { let _x = x as usize; if self.s[_x] { self.lx[_x] -= delta; } } for y in 0..self.n { let _y = y as usize; if self.t[_y] { self.ly[_y] += delta; } } for y in 0..self.n { let _y = y as usize; if !self.t[_y] { self.slack[_y] -= delta; } } } } pub fn apply_hungarian<T: Clone+Debug>(g1: &mut Vec<T>, g2: &mut Vec<T>, weight_fn: &Fn(&T, &T)->f32)
#[cfg(test)] mod tests { use super::*; use models::point::Point; #[test] fn test_hungarian() { let mut v1 = vec![ Point{x: 1., y: 2.}, Point{x: 1.5, y: 3.5}, Point{x: 10., y: -2.} ]; let mut v2 = vec![ Point{x: 1.5, y: 3.5}, Point{x: 1., y: 2.}, Point{x: 10.1, y: -4.}, ]; apply_hungarian(&mut v1, &mut v2, &|p1, p2| -p1.distance(p2)); assert_eq!(v2[0], Point{x: 1., y: 2.}); assert_eq!(v2[1], Point{x: 1.5, y: 3.5}); assert_eq!(v2[2], Point{x: 10.1, y: -4.}); println!("{:?}", v1); println!("{:?}", v2); } }
{ let mut solver = HugarianSolver::new(g1, g2, weight_fn); let success = solver.augment(weight_fn); if success { for i in 0..g1.len() { g2[i] = solver.g2[solver.xy[i] as usize].clone(); } } }
__init__.py
# import the version variable from ._version import __version__ # NOTE: kornia filters and geometry must go first since are the core of the library # and by changing the import order you might get into a circular dependencies issue. from . import filters
# import the other modules for convenience from . import ( augmentation, color, contrib, enhance, feature, losses, metrics, morphology, tracking, utils, x, ) # NOTE: we are going to expose to top level very few things from kornia.constants import pi from kornia.testing import xla_is_available from kornia.utils import ( eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image, )
from . import geometry
model_desktop_pool_instant_clone_push_image_settings.go
/* Horizon Server API Welcome to the Horizon Server API Reference documentation. This API reference provides comprehensive information about status of all Horizon Server components and resources. <br> Choose Latest spec from dropdown to view API reference on latest version available. API version: 2111 */ // Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. package gohorizon import ( "encoding/json" ) // DesktopPoolInstantClonePushImageSettings Settings for the push image operation. type DesktopPoolInstantClonePushImageSettings struct { // Whether to add Virtual TPM device. AddVirtualTpm *bool `json:"add_virtual_tpm,omitempty"` // Determines when to perform the operation on machines which have an active session. * FORCE_LOGOFF: Users will be forced to log off when the system is ready to execute the operation. Before being forcibly logged off, users may have a grace period in which to save their work which can be configured in Global Settings. * WAIT_FOR_LOGOFF: Wait for connected users to disconnect before the task starts. The operation starts immediately when there are no active sessions. LogoffPolicy *string `json:"logoff_policy,omitempty"` // When to start the operation. If unset or the time is in the past, the operation will begin immediately. Measured as epoch time. StartTime *int64 `json:"start_time,omitempty"` // Indicates that the operation should stop on first error. StopOnFirstError *bool `json:"stop_on_first_error,omitempty"` } // NewDesktopPoolInstantClonePushImageSettings instantiates a new DesktopPoolInstantClonePushImageSettings object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewDesktopPoolInstantClonePushImageSettings() *DesktopPoolInstantClonePushImageSettings
// NewDesktopPoolInstantClonePushImageSettingsWithDefaults instantiates a new DesktopPoolInstantClonePushImageSettings object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewDesktopPoolInstantClonePushImageSettingsWithDefaults() *DesktopPoolInstantClonePushImageSettings { this := DesktopPoolInstantClonePushImageSettings{} return &this } // GetAddVirtualTpm returns the AddVirtualTpm field value if set, zero value otherwise. func (o *DesktopPoolInstantClonePushImageSettings) GetAddVirtualTpm() bool { if o == nil || o.AddVirtualTpm == nil { var ret bool return ret } return *o.AddVirtualTpm } // GetAddVirtualTpmOk returns a tuple with the AddVirtualTpm field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DesktopPoolInstantClonePushImageSettings) GetAddVirtualTpmOk() (*bool, bool) { if o == nil || o.AddVirtualTpm == nil { return nil, false } return o.AddVirtualTpm, true } // HasAddVirtualTpm returns a boolean if a field has been set. func (o *DesktopPoolInstantClonePushImageSettings) HasAddVirtualTpm() bool { if o != nil && o.AddVirtualTpm != nil { return true } return false } // SetAddVirtualTpm gets a reference to the given bool and assigns it to the AddVirtualTpm field. func (o *DesktopPoolInstantClonePushImageSettings) SetAddVirtualTpm(v bool) { o.AddVirtualTpm = &v } // GetLogoffPolicy returns the LogoffPolicy field value if set, zero value otherwise. func (o *DesktopPoolInstantClonePushImageSettings) GetLogoffPolicy() string { if o == nil || o.LogoffPolicy == nil { var ret string return ret } return *o.LogoffPolicy } // GetLogoffPolicyOk returns a tuple with the LogoffPolicy field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DesktopPoolInstantClonePushImageSettings) GetLogoffPolicyOk() (*string, bool) { if o == nil || o.LogoffPolicy == nil { return nil, false } return o.LogoffPolicy, true } // HasLogoffPolicy returns a boolean if a field has been set. func (o *DesktopPoolInstantClonePushImageSettings) HasLogoffPolicy() bool { if o != nil && o.LogoffPolicy != nil { return true } return false } // SetLogoffPolicy gets a reference to the given string and assigns it to the LogoffPolicy field. func (o *DesktopPoolInstantClonePushImageSettings) SetLogoffPolicy(v string) { o.LogoffPolicy = &v } // GetStartTime returns the StartTime field value if set, zero value otherwise. func (o *DesktopPoolInstantClonePushImageSettings) GetStartTime() int64 { if o == nil || o.StartTime == nil { var ret int64 return ret } return *o.StartTime } // GetStartTimeOk returns a tuple with the StartTime field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DesktopPoolInstantClonePushImageSettings) GetStartTimeOk() (*int64, bool) { if o == nil || o.StartTime == nil { return nil, false } return o.StartTime, true } // HasStartTime returns a boolean if a field has been set. func (o *DesktopPoolInstantClonePushImageSettings) HasStartTime() bool { if o != nil && o.StartTime != nil { return true } return false } // SetStartTime gets a reference to the given int64 and assigns it to the StartTime field. func (o *DesktopPoolInstantClonePushImageSettings) SetStartTime(v int64) { o.StartTime = &v } // GetStopOnFirstError returns the StopOnFirstError field value if set, zero value otherwise. func (o *DesktopPoolInstantClonePushImageSettings) GetStopOnFirstError() bool { if o == nil || o.StopOnFirstError == nil { var ret bool return ret } return *o.StopOnFirstError } // GetStopOnFirstErrorOk returns a tuple with the StopOnFirstError field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DesktopPoolInstantClonePushImageSettings) GetStopOnFirstErrorOk() (*bool, bool) { if o == nil || o.StopOnFirstError == nil { return nil, false } return o.StopOnFirstError, true } // HasStopOnFirstError returns a boolean if a field has been set. func (o *DesktopPoolInstantClonePushImageSettings) HasStopOnFirstError() bool { if o != nil && o.StopOnFirstError != nil { return true } return false } // SetStopOnFirstError gets a reference to the given bool and assigns it to the StopOnFirstError field. func (o *DesktopPoolInstantClonePushImageSettings) SetStopOnFirstError(v bool) { o.StopOnFirstError = &v } func (o DesktopPoolInstantClonePushImageSettings) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.AddVirtualTpm != nil { toSerialize["add_virtual_tpm"] = o.AddVirtualTpm } if o.LogoffPolicy != nil { toSerialize["logoff_policy"] = o.LogoffPolicy } if o.StartTime != nil { toSerialize["start_time"] = o.StartTime } if o.StopOnFirstError != nil { toSerialize["stop_on_first_error"] = o.StopOnFirstError } return json.Marshal(toSerialize) } type NullableDesktopPoolInstantClonePushImageSettings struct { value *DesktopPoolInstantClonePushImageSettings isSet bool } func (v NullableDesktopPoolInstantClonePushImageSettings) Get() *DesktopPoolInstantClonePushImageSettings { return v.value } func (v *NullableDesktopPoolInstantClonePushImageSettings) Set(val *DesktopPoolInstantClonePushImageSettings) { v.value = val v.isSet = true } func (v NullableDesktopPoolInstantClonePushImageSettings) IsSet() bool { return v.isSet } func (v *NullableDesktopPoolInstantClonePushImageSettings) Unset() { v.value = nil v.isSet = false } func NewNullableDesktopPoolInstantClonePushImageSettings(val *DesktopPoolInstantClonePushImageSettings) *NullableDesktopPoolInstantClonePushImageSettings { return &NullableDesktopPoolInstantClonePushImageSettings{value: val, isSet: true} } func (v NullableDesktopPoolInstantClonePushImageSettings) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableDesktopPoolInstantClonePushImageSettings) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
{ this := DesktopPoolInstantClonePushImageSettings{} return &this }
accelerate.go
package upload import ( "context" "fmt" "mime/multipart" "net/http" "gitlab.com/gitlab-org/gitlab-workhorse/internal/api" "gitlab.com/gitlab-org/gitlab-workhorse/internal/filestore" "gitlab.com/gitlab-org/gitlab-workhorse/internal/secret" jwt "github.com/dgrijalva/jwt-go" ) const RewrittenFieldsHeader = "Gitlab-Workhorse-Multipart-Fields" type savedFileTracker struct { request *http.Request rewrittenFields map[string]string } type MultipartClaims struct { RewrittenFields map[string]string `json:"rewritten_fields"` jwt.StandardClaims } func
(rails filestore.PreAuthorizer, h http.Handler) http.Handler { return rails.PreAuthorizeHandler(func(w http.ResponseWriter, r *http.Request, a *api.Response) { s := &savedFileTracker{request: r} HandleFileUploads(w, r, h, a, s) }, "/authorize") } func (s *savedFileTracker) ProcessFile(_ context.Context, fieldName string, file *filestore.FileHandler, _ *multipart.Writer) error { if s.rewrittenFields == nil { s.rewrittenFields = make(map[string]string) } s.rewrittenFields[fieldName] = file.LocalPath return nil } func (s *savedFileTracker) ProcessField(_ context.Context, _ string, _ *multipart.Writer) error { return nil } func (s *savedFileTracker) Finalize(_ context.Context) error { if s.rewrittenFields == nil { return nil } claims := MultipartClaims{s.rewrittenFields, secret.DefaultClaims} tokenString, err := secret.JWTTokenString(claims) if err != nil { return fmt.Errorf("savedFileTracker.Finalize: %v", err) } s.request.Header.Set(RewrittenFieldsHeader, tokenString) return nil } func (a *savedFileTracker) Name() string { return "accelerate" }
Accelerate
token_cache.py
from django.core.cache import cache def set_cache(user_no, token): cache.set('token:userno:' + user_no, token, timeout=None) cache.set('token:value:'+ token, user_no, timeout=None) def get_token_from_cache(user_no): try: token = cache.get('token:userno:' + user_no) except:
return token def get_userno_from_cache(token): try: user_no = cache.get('token:value:' + str(token)) except: user_no = None return user_no def delete_token_cache(user_no, token): cache.delete('token:userno:'+ user_no) cache.delete('token:value:'+ token)
token = None
cart.py
from django.conf import settings from django.db import models class Cart(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
regions-proc-bound-capture.rs
fn borrowed_proc<'a>(x: &'a isize) -> Box<dyn FnMut()->(isize) + 'a> { // This is legal, because the region bound on `proc` // states that it captures `x`. Box::new(move|| { *x }) } fn static_proc(x: &isize) -> Box<dyn FnMut() -> (isize) + 'static> { // This is illegal, because the region bound on `proc` is 'static. Box::new(move || { *x }) //~^ ERROR lifetime may not live long enough } fn
() { }
main
signMessage.ts
import {Message, SignedMessage, transactionSign, transactionSignRaw} from "@zondax/filecoin-signing-tools/js"; import {FilecoinNumber} from '@glif/filecoin-number/dist'; import {Wallet} from "../interfaces"; import {getKeyPair} from "../filecoin/account"; import {showConfirmationDialog} from "../util/confirmation"; import {LotusRpcApi} from "../filecoin/types"; import {MessageRequest, SignMessageResponse, SignRawMessageResponse} from "@chainsafe/filsnap-types"; import {messageCreator} from "../util/messageCreator"; export async function
( wallet: Wallet, api: LotusRpcApi, messageRequest: MessageRequest ): Promise<SignMessageResponse> { try { const keypair = await getKeyPair(wallet); // extract gas params const gl = messageRequest.gaslimit && messageRequest.gaslimit !== 0 ? messageRequest.gaslimit : 0; const gp = messageRequest.gaspremium && messageRequest.gaspremium !== "0" ? messageRequest.gaspremium : "0"; const gfc = messageRequest.gasfeecap && messageRequest.gasfeecap !== "0" ? messageRequest.gasfeecap : "0"; const nonce = messageRequest.nonce ?? Number(await api.mpoolGetNonce(keypair.address)); const params = messageRequest.params || ""; const method = messageRequest.method || 0; // create message object const message: Message = { from: keypair.address, gasfeecap: gfc, gaslimit: gl, gaspremium: gp, method, nonce, params, to: messageRequest.to, value: messageRequest.value, }; // estimate gas usage if gas params not provided if (message.gaslimit === 0 && message.gasfeecap === "0" && message.gaspremium === "0") { message.gaslimit = await api.gasEstimateGasLimit(message, null); const messageEstimate = await api.gasEstimateMessageGas(message, {MaxFee: "0"}, null); message.gaspremium = messageEstimate.GasPremium; message.gasfeecap = messageEstimate.GasFeeCap; } // show confirmation const confirmation = await showConfirmationDialog( wallet, { description: `It will be signed with address: ${message.from}`, prompt: `Do you want to sign this message?`, textAreaContent: messageCreator( [ {message: 'to:', value: message.to}, {message: 'from:', value: message.from}, {message: 'value:', value: message.value !== '0' && `${new FilecoinNumber(message.value, 'attofil').toFil()} FIL`}, {message: 'method:', value: message.method}, {message: 'params:', value: message.params}, {message: 'gas limit:', value: `${message.gaslimit} aFIL`}, {message: 'gas fee cap:', value: `${message.gasfeecap} aFIL`}, {message: 'gas premium:', value: `${message.gaspremium} aFIL`}, ] ) }, ); let sig: SignedMessage = null; if (confirmation) { sig = transactionSign(message, keypair.privateKey); } return {confirmed: confirmation, error: null, signedMessage: sig}; } catch (e) { return {confirmed: false, error: e, signedMessage: null}; } } export async function signMessageRaw(wallet: Wallet, rawMessage: string): Promise<SignRawMessageResponse> { try { const keypair = await getKeyPair(wallet); const confirmation = await showConfirmationDialog( wallet, { description: `It will be signed with address: ${keypair.address}`, prompt: `Do you want to sign this message?`, textAreaContent: rawMessage, } ); let sig: string = null; if (confirmation) { sig = transactionSignRaw(rawMessage, keypair.privateKey).toString("base64"); } return {confirmed: confirmation, error: null, signature: sig}; } catch (e) { return {confirmed: false, error: e, signature: null}; } }
signMessage
basicJS2.js
var firstName = prompt("What is your first name"); var lastName = prompt("What is your last name"); var age = prompt("What is your age");
console.log("Your full name is " + firstName + " " + lastName); console.log("Your age is " + age);
networks.ts
const channels: any = {}; let frequency = 2412; for (let i = 1; i < 15; i++) { channels[i] = frequency.toString(); frequency = frequency + 5; } frequency = 5180; for (let j = 36; j <= 64; j += 2) { channels[j] = frequency.toString(); frequency += 10; } frequency = 5500; for (let k = 100; k <= 144; k += 2) { channels[k] = frequency.toString(); frequency += 10; } frequency = 5745; for (let l = 149; l <= 161; l += 2) { channels[l] = frequency.toString(); frequency += 10; } frequency = 5825; for (let m = 165; m <= 173; m += 4) { channels[m] = frequency.toString(); frequency += 20; } const frequencyFromChannel = (channelId: any) => { return channels[parseInt(channelId)]; } const dBFromQuality = (quality: any) => { return parseFloat(quality) / 2 - 100; } const qualityFromDB = (db: any) => {
return 2 * (parseFloat(db) + 100); } export { frequencyFromChannel, dBFromQuality, qualityFromDB }
watcher_test.go
// Copyright (c) Fader, IP. All Rights Reserved. // See LICENSE for license information. package fs import ( "context" "io/ioutil" "path/filepath" "sync/atomic" "testing" "os" "time" "github.com/stretchr/testify/assert" ) func TestWatcher_simple(t *testing.T)
// http://stackoverflow.com/questions/33450980/golang-remove-all-contents-of-a-directory?answertab=votes#tab-top func RemoveContents(t *testing.T, dir string) error { d, err := os.Open(dir) if err != nil { return err } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return err } for _, name := range names { err = os.RemoveAll(filepath.Join(dir, name)) if err != nil { return err } t.Logf("Remone %s", name) } return nil }
{ dir, err := ioutil.TempDir("", "fader__workspace") assert.NoError(t, err) time.Sleep(150 * time.Millisecond) var counterCreate, counterModify, counterRemove int32 w := NewFSWatcherWithHook( func(op Op, name, oldname string) { if op&CreateFileOrFolder == CreateFileOrFolder { atomic.AddInt32(&counterCreate, 1) } if op&ModifyOrCreateFile == ModifyOrCreateFile { atomic.AddInt32(&counterModify, 1) } if op&RemoveFileOrFolder == RemoveFileOrFolder { atomic.AddInt32(&counterRemove, 1) } t.Log(op, oldname, name) }, ) err = w.Watch( context.TODO(), dir, ) assert.NoError(t, err, "run watcher") // create folder subdir := filepath.Join(dir, "subdir") os.MkdirAll(subdir, 0700) time.Sleep(150 * time.Millisecond) // create file file := filepath.Join(subdir, "1") ofile, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0666) assert.NoError(t, err, "create file") ofile.Write([]byte("text")) ofile.Sync() ofile.Close() time.Sleep(150 * time.Millisecond) // change file ofile, err = os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0666) assert.NoError(t, err, "create file") ofile.Write([]byte("new text")) ofile.Sync() ofile.Close() time.Sleep(150 * time.Millisecond) // remove file os.RemoveAll(file) time.Sleep(150 * time.Millisecond) // remove folder RemoveContents(t, dir) time.Sleep(500 * time.Millisecond) assert.EqualValues(t, 2, counterCreate) assert.EqualValues(t, 1, counterModify) assert.EqualValues(t, 1, counterRemove) }
ngx-ui-loader-config.token.d.ts
import { InjectionToken } from '@angular/core'; import { NgxUiLoaderConfig } from './ngx-ui-loader-config'; /** * Injection token for ngx-ui-loader configuration */ export declare const NGX_UI_LOADER_CONFIG_TOKEN: InjectionToken<NgxUiLoaderConfig>;
CachedMapResource.ts
/* * cloudbeaver - Cloud Database Manager * Copyright (C) 2020 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0. * you may not use this file except in compliance with the License. */ import { Subject, Observable } from 'rxjs'; import { injectable } from '@cloudbeaver/core-di'; import { CachedResource } from './CachedResource'; export const RESOURCE_KEY_LIST = Symbol('@CachedMapResource/list'); export type ResourceKeyList<TKey> = { list: TKey[]; [RESOURCE_KEY_LIST]: true; } export type ResourceKey<TKey> = TKey | ResourceKeyList<TKey>; @injectable() export abstract class CachedMapResource<TKey, TValue> extends CachedResource< Map<TKey, TValue>, ResourceKey<TKey> > { readonly onItemAdd: Observable<ResourceKey<TKey>>; readonly onItemDelete: Observable<ResourceKey<TKey>>; protected itemAddSubject: Subject<ResourceKey<TKey>>; protected itemDeleteSubject: Subject<ResourceKey<TKey>>; constructor(defaultValue?: Map<TKey, TValue>) { super(defaultValue || new Map()); this.itemAddSubject = new Subject(); this.onItemAdd = this.itemAddSubject.asObservable(); this.itemDeleteSubject = new Subject(); this.onItemDelete = this.itemDeleteSubject.asObservable(); } isOutdated(key: ResourceKey<TKey>): boolean { if (isResourceKeyList(key)) { return key.list.some(key => this.outdated.has(key)); } return this.outdated.has(key); } markOutdated(key: ResourceKey<TKey>): void { if (isResourceKeyList(key)) { for (const itemKey of key.list) { this.outdated.add(itemKey); } } else { this.outdated.add(key); } this.outdatedSubject.next(key); } markUpdated(key: ResourceKey<TKey>): void { if (isResourceKeyList(key)) { for (const itemKey of key.list) { this.outdated.delete(itemKey); } } else { this.outdated.delete(key); } } isLoaded(key: ResourceKey<TKey>): boolean { if (isResourceKeyList(key)) { return key.list.every(key => this.data.has(key)); } return this.data.has(key); } isDataLoading(key: ResourceKey<TKey>): boolean { if (isResourceKeyList(key)) { return key.list.some( key => (isResourceKeyList(this.activePromiseParam) ? this.activePromiseParam.list.includes(key) : key === this.activePromiseParam) ); } return this.activePromiseParam === key; } get(key: TKey): TValue | undefined; get(key: ResourceKeyList<TKey>): Array<TValue | undefined>; get(key: ResourceKey<TKey>): Array<TValue | undefined>| TValue | undefined; get(key: ResourceKey<TKey>): Array<TValue | undefined>| TValue | undefined { if (isResourceKeyList(key)) { return key.list.map(key => this.data.get(key)); } return this.data.get(key); } set(key: TKey, value: TValue): void; set(key: ResourceKeyList<TKey>, value: TValue[]): void; set(key: ResourceKey<TKey>, value: TValue | TValue[]): void { if (isResourceKeyList(key)) { for (let i = 0; i < key.list.length; i++) { this.data.set(key.list[i], (value as TValue[])[i]); } } else { this.data.set(key, value as TValue); } this.markUpdated(key); this.itemAddSubject.next(key); } delete(key: TKey): void; delete(key: ResourceKeyList<TKey>): void; delete(key: ResourceKey<TKey>): void; delete(key: ResourceKey<TKey>) { if (isResourceKeyList(key)) { for (let i = 0; i < key.list.length; i++) { this.data.delete(key.list[i]); } } else { this.data.delete(key); } this.markUpdated(key); this.itemDeleteSubject.next(key); } async refresh(key: TKey): Promise<TValue>; async refresh(key: ResourceKeyList<TKey>): Promise<Array<TValue>>; async refresh(key: ResourceKey<TKey>): Promise<Array<TValue>| TValue>; async refresh(key: ResourceKey<TKey>): Promise<Array<TValue>| TValue> { this.markOutdated(key); await this.loadData(key); return this.get(key) as Array<TValue>| TValue; } async load(key: TKey): Promise<TValue>; async load(key: ResourceKeyList<TKey>): Promise<Array<TValue>>; async load(key: ResourceKey<TKey>): Promise<Array<TValue>| TValue>; async load(key: ResourceKey<TKey>): Promise<Array<TValue>| TValue> { await this.loadData(key); return this.get(key) as Array<TValue>| TValue; } } export function isResourceKeyList<T>(data: any): data is ResourceKeyList<T> {
export function resourceKeyList<T>(list: T[]): ResourceKeyList<T> { return { [RESOURCE_KEY_LIST]: true, list }; }
return data && typeof data === 'object' && RESOURCE_KEY_LIST in data; }
CryptoGetClaim_pb.d.ts
import * as jspb from "google-protobuf" import * as BasicTypes_pb from './BasicTypes_pb'; import * as QueryHeader_pb from './QueryHeader_pb'; import * as ResponseHeader_pb from './ResponseHeader_pb'; import * as CryptoAddClaim_pb from './CryptoAddClaim_pb'; export class CryptoGetClaimQuery extends jspb.Message { getHeader(): QueryHeader_pb.QueryHeader | undefined; setHeader(value?: QueryHeader_pb.QueryHeader): void; hasHeader(): boolean; clearHeader(): void; getAccountid(): BasicTypes_pb.AccountID | undefined; setAccountid(value?: BasicTypes_pb.AccountID): void; hasAccountid(): boolean; clearAccountid(): void; getHash(): Uint8Array | string; getHash_asU8(): Uint8Array; getHash_asB64(): string; setHash(value: Uint8Array | string): void; serializeBinary(): Uint8Array; toObject(includeInstance?: boolean): CryptoGetClaimQuery.AsObject; static toObject(includeInstance: boolean, msg: CryptoGetClaimQuery): CryptoGetClaimQuery.AsObject; static serializeBinaryToWriter(message: CryptoGetClaimQuery, writer: jspb.BinaryWriter): void; static deserializeBinary(bytes: Uint8Array): CryptoGetClaimQuery; static deserializeBinaryFromReader(message: CryptoGetClaimQuery, reader: jspb.BinaryReader): CryptoGetClaimQuery; } export namespace CryptoGetClaimQuery { export type AsObject = { header?: QueryHeader_pb.QueryHeader.AsObject, accountid?: BasicTypes_pb.AccountID.AsObject, hash: Uint8Array | string, } } export class CryptoGetClaimResponse extends jspb.Message { getHeader(): ResponseHeader_pb.ResponseHeader | undefined; setHeader(value?: ResponseHeader_pb.ResponseHeader): void; hasHeader(): boolean; clearHeader(): void; getClaim(): CryptoAddClaim_pb.Claim | undefined; setClaim(value?: CryptoAddClaim_pb.Claim): void; hasClaim(): boolean; clearClaim(): void;
static serializeBinaryToWriter(message: CryptoGetClaimResponse, writer: jspb.BinaryWriter): void; static deserializeBinary(bytes: Uint8Array): CryptoGetClaimResponse; static deserializeBinaryFromReader(message: CryptoGetClaimResponse, reader: jspb.BinaryReader): CryptoGetClaimResponse; } export namespace CryptoGetClaimResponse { export type AsObject = { header?: ResponseHeader_pb.ResponseHeader.AsObject, claim?: CryptoAddClaim_pb.Claim.AsObject, } }
serializeBinary(): Uint8Array; toObject(includeInstance?: boolean): CryptoGetClaimResponse.AsObject; static toObject(includeInstance: boolean, msg: CryptoGetClaimResponse): CryptoGetClaimResponse.AsObject;
test_task_stream.py
import os from time import sleep import pytest from tlz import frequencies from distributed import get_task_stream from distributed.client import wait from distributed.diagnostics.task_stream import TaskStreamPlugin from distributed.metrics import time from distributed.utils_test import div, gen_cluster, inc, slowinc @gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3) async def test_TaskStreamPlugin(c, s, *workers): es = TaskStreamPlugin(s) s.add_plugin(es) assert not es.buffer futures = c.map(div, [1] * 10, range(10)) total = c.submit(sum, futures[1:]) await wait(total) assert len(es.buffer) == 11 workers = dict() rects = es.rectangles(0, 10, workers) assert workers assert all(n == "div" for n in rects["name"]) assert all(d > 0 for d in rects["duration"]) counts = frequencies(rects["color"]) assert counts["black"] == 1 assert set(counts.values()) == {9, 1} assert len(set(rects["y"])) == 3 rects = es.rectangles(2, 5, workers) assert all(len(L) == 3 for L in rects.values()) starts = sorted(rects["start"]) rects = es.rectangles( 2, 5, workers=workers, start_boundary=(starts[0] + starts[1]) / 2000 ) assert set(rects["start"]).issubset(set(starts[1:])) @gen_cluster(client=True) async def test_maxlen(c, s, a, b): tasks = TaskStreamPlugin(s, maxlen=5) s.add_plugin(tasks) futures = c.map(inc, range(10)) await wait(futures) assert len(tasks.buffer) == 5 @gen_cluster(client=True) async def test_collect(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) start = time() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) L = tasks.collect() assert len(L) == len(futures) L = tasks.collect(start=start) assert len(L) == len(futures) L = tasks.collect(start=start + 0.2) assert 4 <= len(L) <= len(futures) L = tasks.collect(start="20 s") assert len(L) == len(futures) L = tasks.collect(start="500ms") assert 0 < len(L) <= len(futures) L = tasks.collect(count=3) assert len(L) == 3 assert L == list(tasks.buffer)[-3:] assert tasks.collect(stop=start + 100, count=3) == tasks.collect(count=3) assert tasks.collect(start=start, count=3) == list(tasks.buffer)[:3] @gen_cluster(client=True) async def test_no_startstops(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) # just to create the key on the scheduler future = c.submit(inc, 1) await wait(future) assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred") # Transition was not recorded because it didn't contain `startstops` assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred", startstops=[]) # Transition was not recorded because `startstops` was empty assert len(tasks.buffer) == 1 tasks.transition( future.key, "processing", "erred", startstops=[dict(start=time(), stop=time())] ) assert len(tasks.buffer) == 2 @gen_cluster(client=True) async def
(c, s, a, b): L = await c.get_task_stream() assert L == () futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) tasks = s.plugins[TaskStreamPlugin.name] L = await c.get_task_stream() assert L == tuple(tasks.buffer) def test_client_sync(client): with get_task_stream(client=client) as ts: sleep(0.1) # to smooth over time differences on the scheduler # to smooth over time differences on the scheduler futures = client.map(inc, range(10)) wait(futures) assert len(ts.data) == 10 @gen_cluster(client=True) async def test_get_task_stream_plot(c, s, a, b): bokeh = pytest.importorskip("bokeh") await c.get_task_stream() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) data, figure = await c.get_task_stream(plot=True) assert isinstance(figure, bokeh.plotting.Figure) def test_get_task_stream_save(client, tmpdir): bokeh = pytest.importorskip("bokeh") tmpdir = str(tmpdir) fn = os.path.join(tmpdir, "foo.html") with get_task_stream(plot="save", filename=fn) as ts: wait(client.map(inc, range(10))) with open(fn) as f: data = f.read() assert "inc" in data assert "bokeh" in data assert isinstance(ts.figure, bokeh.plotting.Figure)
test_client
main.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- from PyQt5.uic import loadUi from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * #from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar) import matplotlib.image as mpimg import sys import radiomics_single as rs qtCreatorFile = "design/diplom.ui" # Enter file here. class MatplotlibWidget(QMainWindow): def
(self): QMainWindow.__init__(self) loadUi(qtCreatorFile, self) self.FlagLoaded = False self.setWindowTitle("Texture Analysis for Diffuse Liver Diseases") self.buttonLoader.clicked.connect(self.choose_file) self.buttonAnalyze.clicked.connect(self.analyze) #self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self)) self.setWindowIcon(QIcon("app.ico")) mainMenu = self.menuBar() fileMenu = mainMenu.addMenu('File') helpMenu = mainMenu.addMenu('Help') buttonLoaderMenu = QAction('Download', self) buttonLoaderMenu.setShortcut('Ctrl+D') buttonLoaderMenu.setStatusTip('Download the region of the interest') buttonLoaderMenu.triggered.connect(self.choose_file) fileMenu.addAction(buttonLoaderMenu) buttonAnalyzeMenu = QAction('Analysis', self) buttonAnalyzeMenu.setShortcut('Ctrl+A') buttonAnalyzeMenu.setStatusTip('Analyse the loaded region of the interest') buttonAnalyzeMenu.triggered.connect(self.analyze) fileMenu.addAction(buttonAnalyzeMenu) buttonExit = QAction('Quit', self) buttonExit.setShortcut('Ctrl+Q') buttonExit.setStatusTip('Quit out of application') buttonExit.triggered.connect(sys.exit) fileMenu.addAction(buttonExit) buttonLaunch = QAction('How to run', self) buttonLaunch.setStatusTip('Get info about how to run the application') self.msgBox1 = QMessageBox(self) self.msgBox1.setIcon(QMessageBox.Information) self.msgBox1.setWindowTitle("How to run") self.msgBox1.setText("To run the classifier:\n1) push the button <Choose an image>\n2) push the button <Analyse>") buttonLaunch.triggered.connect(self.msgBox1.exec_) helpMenu.addAction(buttonLaunch) buttonInfo = QAction('Application', self) buttonInfo.setStatusTip('Get info about the application') self.msgBox2 = QMessageBox(self) self.msgBox2.setIcon(QMessageBox.Information) self.msgBox2.setWindowTitle("Application") self.msgBox2.setText("This application give an ability to load ROI and predict a probable presence of diffuse liver diseases.") buttonInfo.triggered.connect(self.msgBox2.exec_) helpMenu.addAction(buttonInfo) buttonInfo = QAction('Developer', self) buttonInfo.setStatusTip('Get info about the developer') self.msgBox3 = QMessageBox(self) self.msgBox3.setIcon(QMessageBox.Information) self.msgBox3.setWindowTitle("Developer") self.msgBox3.setText("This application was developed by Illia Yankovyi, the student of the 4th year" "\nNTUU Igor Sikorsky Kyiv Polytechnic Institute:" "\nFaculty of Biomedical Engineering (FBME)\n" "\nAcademic unit:BS-52 group\n" "\nSupervisor: Nastenko I., M.D., Candidate of Engineering Sciences, Senior Research Fellow.") buttonInfo.triggered.connect(self.msgBox3.exec_) helpMenu.addAction(buttonInfo) self.labelTitle.setText('Classifier of Diffuse Liver Diseases') font = QFont() font.setPointSize(20) font.setBold(True) self.labelTitle.setFont(font) self.labelTitle.setAlignment(Qt.AlignCenter) self.buttonAnalyze.setText('Analyze Image') self.buttonLoader.setText('Download Image') self.labelResult.setText('To get a prediction:\n\n1) Download the region of interest;\n2) Run the analysis.') def analyze(self): if (self.FlagLoaded): self.labelResult.setText(rs.signle_prediction(self.path)) else: self.labelResult.setText("Image was not chosen!\n\nPlease choose the image\nbefore running the Analysis") self.msgBox4 = QMessageBox(self) self.msgBox4.setIcon(QMessageBox.Warning) self.msgBox4.setWindowTitle("Error! Image was not chosen.") self.msgBox4.setText( "Image was not chosen! Please choose the image before running the Analysis.") self.msgBox4.exec_() def choose_file(self): options = QFileDialog.Options() fileName, _ = QFileDialog.getOpenFileName(self, "Choose an image", "", "Image (*.bmp *.png *.jpeg *.jpg)", options=options) extensions = ['png', 'jpg', 'jpeg', 'bmp'] fileExtension = (fileName.split('.'))[-1].lower() if fileName: if fileExtension in extensions: self.path = fileName self.img = mpimg.imread(self.path) self.MplWidget.canvas.axes.clear() self.MplWidget.canvas.axes.imshow(self.img) self.MplWidget.canvas.axes.set_title('Chosen image') self.MplWidget.canvas.draw() self.FlagLoaded = True else: self.labelResult.setText("Chosen filetype is not supported.\nSupported filetypes:\nBMP, PNG, JPEG, JPG") self.msgBox5 = QMessageBox(self) self.msgBox5.setIcon(QMessageBox.Warning) self.msgBox5.setWindowTitle("Error! Chosen filetype is not supported.") self.msgBox5.setText( "Chosen filetype is not supported.\nSupported filetypes:\nBMP, PNG, JPEG, JPG.") self.msgBox5.exec_() if __name__ == "__main__": app = QApplication([]) window = MatplotlibWidget() window.show() sys.exit(app.exec_())
__init__
models.rs
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Address { #[serde(rename = "addressLine1", default, skip_serializing_if = "Option::is_none")] pub address_line1: Option<String>, #[serde(rename = "addressLine2", default, skip_serializing_if = "Option::is_none")] pub address_line2: Option<String>, #[serde(rename = "addressLine3", default, skip_serializing_if = "Option::is_none")] pub address_line3: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub city: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub region: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub country: Option<String>, #[serde(rename = "postalCode", default, skip_serializing_if = "Option::is_none")] pub postal_code: Option<String>, #[serde(rename = "phoneNumber", default, skip_serializing_if = "Option::is_none")] pub phone_number: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Amount { #[serde(default, skip_serializing_if = "Option::is_none")] pub currency: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<f64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BillingAccount { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BillingAccountProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BillingAccountProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub company: Option<String>, #[serde(rename = "accountType", default, skip_serializing_if = "Option::is_none")] pub account_type: Option<billing_account_properties::AccountType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub address: Option<Address>, #[serde(rename = "defaultCurrency", default, skip_serializing_if = "Option::is_none")] pub default_currency: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub country: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub agreements: Option<String>, #[serde(rename = "invoiceSections", default, skip_serializing_if = "Vec::is_empty")] pub invoice_sections: Vec<InvoiceSection>, #[serde(rename = "billingProfiles", default, skip_serializing_if = "Vec::is_empty")] pub billing_profiles: Vec<BillingProfile>, #[serde(rename = "enrollmentDetails", default, skip_serializing_if = "Option::is_none")] pub enrollment_details: Option<Enrollment>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub departments: Vec<Department>, #[serde(rename = "enrollmentAccounts", default, skip_serializing_if = "Vec::is_empty")] pub enrollment_accounts: Vec<EnrollmentAccount>, } pub mod billing_account_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AccountType { CommerceRoot, Enrollment, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BillingProfile { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BillingProfileProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BillingProfileProperties { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "poNumber", default, skip_serializing_if = "Option::is_none")] pub po_number: Option<String>, #[serde(rename = "billingAddress", default, skip_serializing_if = "Option::is_none")] pub billing_address: Option<Address>, #[serde(rename = "billingContact", default, skip_serializing_if = "Option::is_none")] pub billing_contact: Option<String>, #[serde(rename = "emailInvoice", default, skip_serializing_if = "Option::is_none")] pub email_invoice: Option<bool>, #[serde(rename = "invoiceDay", default, skip_serializing_if = "Option::is_none")] pub invoice_day: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub currency: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargeSummaryByBillingAccount { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ChargeSummaryProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargeSummaryByBillingProfile { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ChargeSummaryProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargeSummaryByInvoiceSection { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ChargeSummaryProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargeSummaryProperties { #[serde(rename = "billingPeriodId", default, skip_serializing_if = "Option::is_none")] pub billing_period_id: Option<String>, #[serde(rename = "usageStart", default, skip_serializing_if = "Option::is_none")] pub usage_start: Option<String>, #[serde(rename = "usageEnd", default, skip_serializing_if = "Option::is_none")] pub usage_end: Option<String>, #[serde(rename = "azureCharges", default, skip_serializing_if = "Option::is_none")] pub azure_charges: Option<Amount>, #[serde(rename = "chargesBilledSeparately", default, skip_serializing_if = "Option::is_none")] pub charges_billed_separately: Option<Amount>, #[serde(rename = "marketplaceCharges", default, skip_serializing_if = "Option::is_none")] pub marketplace_charges: Option<Amount>, #[serde(rename = "billingAccountId", default, skip_serializing_if = "Option::is_none")] pub billing_account_id: Option<String>, #[serde(rename = "billingProfileId", default, skip_serializing_if = "Option::is_none")] pub billing_profile_id: Option<String>, #[serde(rename = "invoiceSectionId", default, skip_serializing_if = "Option::is_none")] pub invoice_section_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargesListByBillingAccount { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ChargeSummaryByBillingAccount>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargesListByBillingProfile { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ChargeSummaryByBillingProfile>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ChargesListByInvoiceSection { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ChargeSummaryByInvoiceSection>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CreditBalanceSummary { #[serde(rename = "estimatedBalance", default, skip_serializing_if = "Option::is_none")] pub estimated_balance: Option<Amount>, #[serde(rename = "currentBalance", default, skip_serializing_if = "Option::is_none")] pub current_balance: Option<Amount>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CreditSummary { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<CreditSummaryProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CreditSummaryProperties { #[serde(rename = "balanceSummary", default, skip_serializing_if = "Option::is_none")] pub balance_summary: Option<CreditBalanceSummary>, #[serde(rename = "pendingCreditAdjustments", default, skip_serializing_if = "Option::is_none")] pub pending_credit_adjustments: Option<Amount>, #[serde(rename = "expiredCredit", default, skip_serializing_if = "Option::is_none")] pub expired_credit: Option<Amount>, #[serde(rename = "pendingEligibleCharges", default, skip_serializing_if = "Option::is_none")] pub pending_eligible_charges: Option<Amount>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Department { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DepartmentProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DepartmentProperties { #[serde(rename = "departmentName", default, skip_serializing_if = "Option::is_none")] pub department_name: Option<String>, #[serde(rename = "costCenter", default, skip_serializing_if = "Option::is_none")] pub cost_center: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "enrollmentAccounts", default, skip_serializing_if = "Vec::is_empty")] pub enrollment_accounts: Vec<EnrollmentAccount>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DownloadUrl { #[serde(rename = "downloadUrl", default, skip_serializing_if = "Option::is_none")] pub download_url: Option<String>, #[serde(rename = "expiryTime", default, skip_serializing_if = "Option::is_none")] pub expiry_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Enrollment { #[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")] pub start_date: Option<String>, #[serde(rename = "endDate", default, skip_serializing_if = "Option::is_none")] pub end_date: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub currency: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub channel: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub policies: Option<EnrollmentPolicies>, #[serde(default, skip_serializing_if = "Option::is_none")] pub language: Option<String>, #[serde(rename = "countryCode", default, skip_serializing_if = "Option::is_none")] pub country_code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "billingCycle", default, skip_serializing_if = "Option::is_none")] pub billing_cycle: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnrollmentAccount { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EnrollmentAccountProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnrollmentAccountProperties { #[serde(rename = "accountName", default, skip_serializing_if = "Option::is_none")] pub account_name: Option<String>, #[serde(rename = "costCenter", default, skip_serializing_if = "Option::is_none")] pub cost_center: Option<String>, #[serde(rename = "accountOwner", default, skip_serializing_if = "Option::is_none")] pub account_owner: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")] pub start_date: Option<String>, #[serde(rename = "endDate", default, skip_serializing_if = "Option::is_none")] pub end_date: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub department: Option<Department>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnrollmentPolicies { #[serde(rename = "accountOwnerViewCharges", default, skip_serializing_if = "Option::is_none")] pub account_owner_view_charges: Option<bool>, #[serde(rename = "departmentAdminViewCharges", default, skip_serializing_if = "Option::is_none")] pub department_admin_view_charges: Option<bool>, #[serde(rename = "marketplacesEnabled", default, skip_serializing_if = "Option::is_none")] pub marketplaces_enabled: Option<bool>, #[serde(rename = "reservedInstancesEnabled", default, skip_serializing_if = "Option::is_none")] pub reserved_instances_enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetails { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventProperties { #[serde(rename = "transactionDate", default, skip_serializing_if = "Option::is_none")] pub transaction_date: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "newCredit", default, skip_serializing_if = "Option::is_none")] pub new_credit: Option<Amount>, #[serde(default, skip_serializing_if = "Option::is_none")] pub adjustments: Option<Amount>, #[serde(rename = "creditExpired", default, skip_serializing_if = "Option::is_none")] pub credit_expired: Option<Amount>, #[serde(default, skip_serializing_if = "Option::is_none")] pub charges: Option<Amount>, #[serde(rename = "closedBalance", default, skip_serializing_if = "Option::is_none")] pub closed_balance: Option<Amount>, #[serde(rename = "eventType", default, skip_serializing_if = "Option::is_none")] pub event_type: Option<event_properties::EventType>, #[serde(rename = "invoiceNumber", default, skip_serializing_if = "Option::is_none")] pub invoice_number: Option<String>, } pub mod event_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EventType { NewCredit, ExpiredCredit, SettledCharges, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventSummary { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EventProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct
{ #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EventSummary>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InvoiceSection { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<InvoiceSectionProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InvoiceSectionProperties { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "billingProfiles", default, skip_serializing_if = "Vec::is_empty")] pub billing_profiles: Vec<BillingProfile>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LotProperties { #[serde(rename = "originalAmount", default, skip_serializing_if = "Option::is_none")] pub original_amount: Option<Amount>, #[serde(rename = "closedBalance", default, skip_serializing_if = "Option::is_none")] pub closed_balance: Option<Amount>, #[serde(default, skip_serializing_if = "Option::is_none")] pub source: Option<lot_properties::Source>, #[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")] pub start_date: Option<String>, #[serde(rename = "expirationDate", default, skip_serializing_if = "Option::is_none")] pub expiration_date: Option<String>, #[serde(rename = "poNumber", default, skip_serializing_if = "Option::is_none")] pub po_number: Option<String>, } pub mod lot_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Source { PurchasedCredit, PromotionalCredit, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LotSummary { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<LotProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Lots { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<LotSummary>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PricesheetDownloadResponse { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DownloadUrl>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProxyResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")] pub e_tag: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, }
Events
dispatcher_test.go
package dispatcher import ( "crypto/tls" "fmt" "net" "testing" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "github.com/docker/go-events" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/ca" "github.com/docker/swarmkit/ca/testutils" raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" "github.com/docker/swarmkit/manager/state/store" "github.com/stretchr/testify/assert" ) type grpcDispatcher struct { Clients []api.DispatcherClient SecurityConfigs []*ca.SecurityConfig Store *store.MemoryStore grpcServer *grpc.Server dispatcherServer *Dispatcher conns []*grpc.ClientConn testCA *testutils.TestCA } func (gd *grpcDispatcher) Close() { // Close the client connection. gd.dispatcherServer.Stop() for _, conn := range gd.conns { conn.Close() } gd.grpcServer.Stop() gd.testCA.Stop() } type testCluster struct { addr string store *store.MemoryStore } func (t *testCluster) GetMemberlist() map[uint64]*api.RaftMember { return map[uint64]*api.RaftMember{ 1: { NodeID: "1", Addr: t.addr, }, } } func (t *testCluster) SubscribePeers() (chan events.Event, func()) { ch := make(chan events.Event, 1) ch <- []*api.Peer{ { Addr: t.addr, NodeID: "1", }, } return ch, func() { close(ch) } } func (t *testCluster) MemoryStore() *store.MemoryStore { return t.store } func startDispatcher(c *Config) (*grpcDispatcher, error) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } tca := testutils.NewTestCA(nil) agentSecurityConfig1, err := tca.NewNodeConfig(ca.WorkerRole) if err != nil { return nil, err } agentSecurityConfig2, err := tca.NewNodeConfig(ca.WorkerRole) if err != nil { return nil, err } managerSecurityConfig, err := tca.NewNodeConfig(ca.ManagerRole) if err != nil { return nil, err } serverOpts := []grpc.ServerOption{grpc.Creds(managerSecurityConfig.ServerTLSCreds)} s := grpc.NewServer(serverOpts...) tc := &testCluster{addr: l.Addr().String(), store: tca.MemoryStore} d := New(tc, c) authorize := func(ctx context.Context, roles []string) error { _, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, tca.Organization, nil) return err } authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(d, authorize) api.RegisterDispatcherServer(s, authenticatedDispatcherAPI) go func() { // Serve will always return an error (even when properly stopped). // Explicitly ignore it. _ = s.Serve(l) }() go d.Run(context.Background()) if err := raftutils.PollFuncWithTimeout(nil, func() error { d.mu.Lock() defer d.mu.Unlock() if !d.isRunning() { return fmt.Errorf("dispatcher is not running") } return nil }, 5*time.Second); err != nil { return nil, err } clientOpts := []grpc.DialOption{grpc.WithTimeout(10 * time.Second)} clientOpts1 := append(clientOpts, grpc.WithTransportCredentials(agentSecurityConfig1.ClientTLSCreds)) clientOpts2 := append(clientOpts, grpc.WithTransportCredentials(agentSecurityConfig2.ClientTLSCreds)) clientOpts3 := append(clientOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))) conn1, err := grpc.Dial(l.Addr().String(), clientOpts1...) if err != nil { return nil, err } conn2, err := grpc.Dial(l.Addr().String(), clientOpts2...) if err != nil { return nil, err } conn3, err := grpc.Dial(l.Addr().String(), clientOpts3...) if err != nil { return nil, err } clients := []api.DispatcherClient{api.NewDispatcherClient(conn1), api.NewDispatcherClient(conn2), api.NewDispatcherClient(conn3)} securityConfigs := []*ca.SecurityConfig{agentSecurityConfig1, agentSecurityConfig2, managerSecurityConfig} conns := []*grpc.ClientConn{conn1, conn2, conn3} return &grpcDispatcher{ Clients: clients, SecurityConfigs: securityConfigs, Store: tc.MemoryStore(), dispatcherServer: d, conns: conns, grpcServer: s, testCA: tca, }, nil } func TestRegisterTwice(t *testing.T) { cfg := DefaultConfig() cfg.RateLimitPeriod = 0 gd, err := startDispatcher(cfg) assert.NoError(t, err) defer gd.Close() var expectedSessionID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, msg.SessionID) expectedSessionID = msg.SessionID stream.CloseSend() } { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.NoError(t, err) // session should be different! assert.NotEqual(t, msg.SessionID, expectedSessionID) stream.CloseSend() } } func TestRegisterExceedRateLimit(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() for i := 0; i < 3; i++ { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, msg.SessionID) stream.CloseSend() } { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) defer stream.CloseSend() assert.NoError(t, err) _, err = stream.Recv() assert.Error(t, err) assert.Equal(t, codes.Unavailable, grpc.Code(err), err.Error()) } } func TestRegisterNoCert(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() // This client has no certificates, this should fail stream, err := gd.Clients[2].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.Nil(t, resp) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") } func TestHeartbeat(t *testing.T) { cfg := DefaultConfig() cfg.HeartbeatPeriod = 500 * time.Millisecond cfg.HeartbeatEpsilon = 0 gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID } time.Sleep(250 * time.Millisecond) { // heartbeat without correct SessionID should fail resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{}) assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.InvalidArgument) } resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
assert.NoError(t, err) assert.NotZero(t, resp.Period) time.Sleep(300 * time.Millisecond) gd.Store.View(func(readTx store.ReadTx) { storeNodes, err := store.FindNodes(readTx, store.All) assert.NoError(t, err) assert.NotEmpty(t, storeNodes) found := false for _, node := range storeNodes { if node.ID == gd.SecurityConfigs[0].ClientTLSCreds.NodeID() { found = true assert.Equal(t, api.NodeStatus_READY, node.Status.State) } } assert.True(t, found) }) } func TestHeartbeatNoCert(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() // heartbeat without correct SessionID should fail resp, err := gd.Clients[2].Heartbeat(context.Background(), &api.HeartbeatRequest{}) assert.Nil(t, resp) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") } func TestHeartbeatTimeout(t *testing.T) { t.Parallel() cfg := DefaultConfig() cfg.HeartbeatPeriod = 100 * time.Millisecond cfg.HeartbeatEpsilon = 0 gd, err := startDispatcher(cfg) assert.NoError(t, err) defer gd.Close() var expectedSessionID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID } time.Sleep(500 * time.Millisecond) gd.Store.View(func(readTx store.ReadTx) { storeNodes, err := store.FindNodes(readTx, store.ByIDPrefix(gd.SecurityConfigs[0].ClientTLSCreds.NodeID())) assert.NoError(t, err) assert.NotEmpty(t, storeNodes) assert.Equal(t, api.NodeStatus_DOWN, storeNodes[0].Status.State) }) // check that node is deregistered resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID}) assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.ErrorDesc(err), ErrNodeNotRegistered.Error()) } func TestHeartbeatUnregistered(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{}) assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, ErrSessionInvalid.Error(), grpc.ErrorDesc(err)) } // If the session ID is not sent as part of the Assignments request, an error is returned to the stream func TestAssignmentsErrorsIfNoSessionID(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() // without correct SessionID should fail stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{}) assert.NoError(t, err) assert.NotNil(t, stream) defer stream.CloseSend() resp, err := stream.Recv() assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.InvalidArgument) } // Assignments will send down any existing node tasks > ASSIGNED, and any secrets // for said tasks that are <= RUNNING (if the secrets exist) func TestAssignmentsInitialNodeTasks(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) // create the relevant secrets and tasks secrets, tasks := makeTasksAndSecrets(t, nodeID) err = gd.Store.Update(func(tx store.Tx) error { for _, secret := range secrets[:] { assert.NoError(t, store.CreateSecret(tx, secret)) } for _, task := range tasks { assert.NoError(t, store.CreateTask(tx, task)) } return nil }) assert.NoError(t, err) stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) assert.NoError(t, err) defer stream.CloseSend() time.Sleep(100 * time.Millisecond) // check the initial task and secret stream resp, err := stream.Recv() assert.NoError(t, err) // FIXME(aaronl): This is hard to maintain. assert.Equal(t, 17, len(resp.Changes)) taskChanges, secretChanges := collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, 10) // 10 types of task states >= assigned, 2 types < assigned for _, task := range tasks[2:] { assert.NotNil(t, taskChanges[idAndAction{id: task.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } assert.Len(t, secretChanges, 7) // 6 different secrets for states between assigned and running inclusive plus secret12 for _, secret := range secrets[2:8] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } // updating all the tasks will attempt to remove all the secrets for the tasks that are in state > running err = gd.Store.Update(func(tx store.Tx) error { for _, task := range tasks { assert.NoError(t, store.UpdateTask(tx, task)) } return nil }) assert.NoError(t, err) // updates for all the tasks, remove secret sent for the 4 types of states > running resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, 5, len(resp.Changes)) taskChanges, secretChanges = collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, 1) assert.NotNil(t, taskChanges[idAndAction{id: tasks[2].ID, action: api.AssignmentChange_AssignmentActionUpdate}]) // this is the task in ASSIGNED assert.Len(t, secretChanges, 4) // these are the secrets for states > running for _, secret := range secrets[9 : len(secrets)-1] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionRemove}]) } // deleting the tasks removes all the secrets for every single task, no matter // what state it's in err = gd.Store.Update(func(tx store.Tx) error { for _, task := range tasks { assert.NoError(t, store.DeleteTask(tx, task.ID)) } return nil }) assert.NoError(t, err) // updates for all the tasks >= ASSIGNMENT, and remove secrets for all of them, // (there will be 2 tasks changes that won't be sent down) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(tasks)-2+len(secrets)-2, len(resp.Changes)) taskChanges, secretChanges = collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, len(tasks)-2) for _, task := range tasks[2:] { assert.NotNil(t, taskChanges[idAndAction{id: task.ID, action: api.AssignmentChange_AssignmentActionRemove}]) } assert.Len(t, secretChanges, len(secrets)-2) for _, secret := range secrets[2:] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionRemove}]) } } // As tasks are added, assignments will send down tasks > ASSIGNED, and any secrets // for said tasks that are <= RUNNING (if the secrets exist) func TestAssignmentsAddingTasks(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) assert.NoError(t, err) defer stream.CloseSend() time.Sleep(100 * time.Millisecond) // There are no initial tasks or secrets resp, err := stream.Recv() assert.NoError(t, err) assert.Empty(t, resp.Changes) // create the relevant secrets and tasks and update the tasks secrets, tasks := makeTasksAndSecrets(t, nodeID) err = gd.Store.Update(func(tx store.Tx) error { for _, secret := range secrets[:len(secrets)-1] { assert.NoError(t, store.CreateSecret(tx, secret)) } for _, task := range tasks { assert.NoError(t, store.CreateTask(tx, task)) } return nil }) assert.NoError(t, err) // Nothing happens until we update. Updating all the tasks will send updates for all the tasks >= ASSIGNED (10), // and secrets for all the tasks >= ASSIGNED and <= RUNNING (6). err = gd.Store.Update(func(tx store.Tx) error { for _, task := range tasks { assert.NoError(t, store.UpdateTask(tx, task)) } return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) // FIXME(aaronl): This is hard to maintain. assert.Equal(t, 10+6, len(resp.Changes)) taskChanges, secretChanges := collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, 10) for _, task := range tasks[2:] { assert.NotNil(t, taskChanges[idAndAction{id: task.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } assert.Len(t, secretChanges, 6) // all the secrets for tasks >= ASSIGNED and <= RUNNING for _, secret := range secrets[2:8] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } // deleting the tasks removes all the secrets for every single task, no matter // what state it's in err = gd.Store.Update(func(tx store.Tx) error { for _, task := range tasks { assert.NoError(t, store.DeleteTask(tx, task.ID)) } return nil }) assert.NoError(t, err) // updates for all the tasks >= ASSIGNMENT, and remove secrets for all of them, even ones that don't exist // (there will be 2 tasks changes that won't be sent down) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(tasks)-2+len(secrets)-2, len(resp.Changes)) taskChanges, secretChanges = collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, len(tasks)-2) for _, task := range tasks[2:] { assert.NotNil(t, taskChanges[idAndAction{id: task.ID, action: api.AssignmentChange_AssignmentActionRemove}]) } assert.Len(t, secretChanges, len(secrets)-2) for _, secret := range secrets[2:] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionRemove}]) } } // If a secret is updated or deleted, even if it's for an existing task, no changes will be sent down func TestAssignmentsSecretUpdateAndDeletion(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) // create the relevant secrets and tasks secrets, tasks := makeTasksAndSecrets(t, nodeID) err = gd.Store.Update(func(tx store.Tx) error { for _, secret := range secrets[:len(secrets)-1] { assert.NoError(t, store.CreateSecret(tx, secret)) } for _, task := range tasks { assert.NoError(t, store.CreateTask(tx, task)) } return nil }) assert.NoError(t, err) stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) assert.NoError(t, err) defer stream.CloseSend() time.Sleep(100 * time.Millisecond) // check the initial task and secret stream resp, err := stream.Recv() assert.NoError(t, err) // FIXME(aaronl): This is hard to maintain. assert.Equal(t, 16, len(resp.Changes)) taskChanges, secretChanges := collectTasksAndSecrets(resp.Changes) assert.Len(t, taskChanges, 10) // 10 types of task states >= assigned, 2 types < assigned for _, task := range tasks[2:] { assert.NotNil(t, taskChanges[idAndAction{id: task.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } assert.Len(t, secretChanges, 6) // 6 types of task states between assigned and running inclusive for _, secret := range secrets[2:8] { assert.NotNil(t, secretChanges[idAndAction{id: secret.ID, action: api.AssignmentChange_AssignmentActionUpdate}]) } // updating secrets, used by tasks or not, do not cause any changes err = gd.Store.Update(func(tx store.Tx) error { for _, secret := range secrets[:len(secrets)-2] { secret.Spec.Data = []byte("new secret data") assert.NoError(t, store.UpdateSecret(tx, secret)) } return nil }) assert.NoError(t, err) recvChan := make(chan struct{}) go func() { _, _ = stream.Recv() recvChan <- struct{}{} }() select { case <-recvChan: assert.Fail(t, "secret update should not trigger dispatcher update") case <-time.After(250 * time.Millisecond): } // deleting secrets, used by tasks or not, do not cause any changes err = gd.Store.Update(func(tx store.Tx) error { for _, secret := range secrets[:len(secrets)-2] { assert.NoError(t, store.DeleteSecret(tx, secret.ID)) } return nil }) assert.NoError(t, err) select { case <-recvChan: assert.Fail(t, "secret delete should not trigger dispatcher update") case <-time.After(250 * time.Millisecond): } } func TestTasksStatusChange(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string var nodeID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } testTask1 := &api.Task{ NodeID: nodeID, ID: "testTask1", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } testTask2 := &api.Task{ NodeID: nodeID, ID: "testTask2", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) assert.NoError(t, err) time.Sleep(100 * time.Millisecond) resp, err := stream.Recv() assert.NoError(t, err) // initially no tasks assert.Equal(t, 0, len(resp.Changes)) // Creating the tasks will not create an event for assignments err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) return nil }) assert.NoError(t, err) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, testTask1)) assert.NoError(t, store.UpdateTask(tx, testTask2)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(resp.Changes), 2) tasks, secrets := collectTasksAndSecrets(resp.Changes) assert.Len(t, tasks, 2) assert.Len(t, secrets, 0) assert.NotNil(t, tasks[idAndAction{id: "testTask1", action: api.AssignmentChange_AssignmentActionUpdate}]) assert.NotNil(t, tasks[idAndAction{id: "testTask2", action: api.AssignmentChange_AssignmentActionUpdate}]) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, &api.Task{ ID: testTask1.ID, NodeID: nodeID, // only Status is changed for task1 Status: api.TaskStatus{State: api.TaskStateFailed, Err: "1234"}, DesiredState: api.TaskStateReady, })) return nil }) assert.NoError(t, err) // dispatcher shouldn't send snapshot for this update recvChan := make(chan struct{}) go func() { _, _ = stream.Recv() recvChan <- struct{}{} }() select { case <-recvChan: assert.Fail(t, "task.Status update should not trigger dispatcher update") case <-time.After(250 * time.Millisecond): } } func TestTasksBatch(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string var nodeID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } testTask1 := &api.Task{ NodeID: nodeID, ID: "testTask1", Status: api.TaskStatus{State: api.TaskStateAssigned}, } testTask2 := &api.Task{ NodeID: nodeID, ID: "testTask2", Status: api.TaskStatus{State: api.TaskStateAssigned}, } stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) assert.NoError(t, err) resp, err := stream.Recv() assert.NoError(t, err) // initially no tasks assert.Equal(t, 0, len(resp.Changes)) // Create, Update and Delete tasks. err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) return nil }) assert.NoError(t, err) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, testTask1)) assert.NoError(t, store.UpdateTask(tx, testTask2)) return nil }) assert.NoError(t, err) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) // all tasks have been deleted tasks, secrets := collectTasksAndSecrets(resp.Changes) assert.Len(t, tasks, 2) assert.Len(t, secrets, 0) assert.Equal(t, api.AssignmentChange_AssignmentActionRemove, resp.Changes[0].Action) assert.Equal(t, api.AssignmentChange_AssignmentActionRemove, resp.Changes[1].Action) } func TestTasksNoCert(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() stream, err := gd.Clients[2].Assignments(context.Background(), &api.AssignmentsRequest{}) assert.NoError(t, err) assert.NotNil(t, stream) resp, err := stream.Recv() assert.Nil(t, resp) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") } func TestTaskUpdate(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var ( expectedSessionID string nodeID string ) { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } // testTask1 and testTask2 are advanced from NEW to ASSIGNED. testTask1 := &api.Task{ ID: "testTask1", NodeID: nodeID, } testTask2 := &api.Task{ ID: "testTask2", NodeID: nodeID, } // testTask3 is used to confirm that status updates for a task not // assigned to the node sending the update are rejected. testTask3 := &api.Task{ ID: "testTask3", NodeID: "differentnode", } // testTask4 is used to confirm that a task's state is not allowed to // move backwards. testTask4 := &api.Task{ ID: "testTask4", NodeID: nodeID, Status: api.TaskStatus{ State: api.TaskStateShutdown, }, } err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) assert.NoError(t, store.CreateTask(tx, testTask3)) assert.NoError(t, store.CreateTask(tx, testTask4)) return nil }) assert.NoError(t, err) testTask1.Status = api.TaskStatus{State: api.TaskStateAssigned} testTask2.Status = api.TaskStatus{State: api.TaskStateAssigned} testTask3.Status = api.TaskStatus{State: api.TaskStateAssigned} testTask4.Status = api.TaskStatus{State: api.TaskStateRunning} updReq := &api.UpdateTaskStatusRequest{ Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ { TaskID: testTask1.ID, Status: &testTask1.Status, }, { TaskID: testTask2.ID, Status: &testTask2.Status, }, { TaskID: testTask4.ID, Status: &testTask4.Status, }, }, } { // without correct SessionID should fail resp, err := gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.InvalidArgument) } updReq.SessionID = expectedSessionID _, err = gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) assert.NoError(t, err) { // updating a task not assigned to us should fail updReq.Updates = []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ { TaskID: testTask3.ID, Status: &testTask3.Status, }, } resp, err := gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.PermissionDenied) } gd.dispatcherServer.processUpdates() gd.Store.View(func(readTx store.ReadTx) { storeTask1 := store.GetTask(readTx, testTask1.ID) assert.NotNil(t, storeTask1) storeTask2 := store.GetTask(readTx, testTask2.ID) assert.NotNil(t, storeTask2) assert.Equal(t, storeTask1.Status.State, api.TaskStateAssigned) assert.Equal(t, storeTask2.Status.State, api.TaskStateAssigned) storeTask3 := store.GetTask(readTx, testTask3.ID) assert.NotNil(t, storeTask3) assert.Equal(t, storeTask3.Status.State, api.TaskStateNew) // The update to task4's state should be ignored because it // would have moved backwards. storeTask4 := store.GetTask(readTx, testTask4.ID) assert.NotNil(t, storeTask4) assert.Equal(t, storeTask4.Status.State, api.TaskStateShutdown) }) } func TestTaskUpdateNoCert(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() testTask1 := &api.Task{ ID: "testTask1", } err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) return nil }) assert.NoError(t, err) testTask1.Status = api.TaskStatus{State: api.TaskStateAssigned} updReq := &api.UpdateTaskStatusRequest{ Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ { TaskID: testTask1.ID, Status: &testTask1.Status, }, }, } // without correct SessionID should fail resp, err := gd.Clients[2].UpdateTaskStatus(context.Background(), updReq) assert.Nil(t, resp) assert.Error(t, err) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") } func TestSession(t *testing.T) { cfg := DefaultConfig() gd, err := startDispatcher(cfg) assert.NoError(t, err) defer gd.Close() stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) assert.Equal(t, 1, len(resp.Managers)) } func TestSessionNoCert(t *testing.T) { cfg := DefaultConfig() gd, err := startDispatcher(cfg) assert.NoError(t, err) defer gd.Close() stream, err := gd.Clients[2].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) msg, err := stream.Recv() assert.Nil(t, msg) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") } func getSessionAndNodeID(t *testing.T, c api.DispatcherClient) (string, string) { stream, err := c.Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) return resp.SessionID, resp.Node.ID } type idAndAction struct { id string action api.AssignmentChange_AssignmentAction } func collectTasksAndSecrets(changes []*api.AssignmentChange) (map[idAndAction]*api.Task, map[idAndAction]*api.Secret) { tasks := make(map[idAndAction]*api.Task) secrets := make(map[idAndAction]*api.Secret) for _, change := range changes { task := change.Assignment.GetTask() if task != nil { tasks[idAndAction{id: task.ID, action: change.Action}] = task } secret := change.Assignment.GetSecret() if secret != nil { secrets[idAndAction{id: secret.ID, action: change.Action}] = secret } } return tasks, secrets } func makeTasksAndSecrets(t *testing.T, nodeID string) ([]*api.Secret, []*api.Task) { var secrets []*api.Secret var tasks []*api.Task for i := 0; i <= len(taskStatesInOrder); i++ { secrets = append(secrets, &api.Secret{ ID: fmt.Sprintf("IDsecret%d", i), Digest: fmt.Sprintf("abc%d", i), Spec: api.SecretSpec{ Annotations: api.Annotations{ Name: fmt.Sprintf("secret%d", i), }, Data: []byte(fmt.Sprintf("secret%d", i)), }, }) } for i, taskState := range taskStatesInOrder { tasks = append(tasks, &api.Task{ NodeID: nodeID, ID: fmt.Sprintf("testTask%d", i), Status: api.TaskStatus{State: taskState}, DesiredState: api.TaskStateReady, Spec: taskSpecFromSecrets(secrets[i], secrets[len(secrets)-1]), }) } return secrets, tasks } func taskSpecFromSecrets(secrets ...*api.Secret) api.TaskSpec { var secretRefs []*api.SecretReference for _, s := range secrets { secretRefs = append(secretRefs, &api.SecretReference{ SecretName: s.Spec.Annotations.Name, SecretID: s.ID, Target: &api.SecretReference_File{ File: &api.SecretReference_FileTarget{ Name: "target.txt", UID: "0", GID: "0", Mode: 0666, }, }, }) } return api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{ Secrets: secretRefs, }, }, } } var taskStatesInOrder = []api.TaskState{ api.TaskStateNew, api.TaskStatePending, api.TaskStateAssigned, api.TaskStateAccepted, api.TaskStatePreparing, api.TaskStateReady, api.TaskStateStarting, api.TaskStateRunning, api.TaskStateCompleted, api.TaskStateShutdown, api.TaskStateFailed, api.TaskStateRejected, } // Ensure we test the old Tasks() API for backwards compat func TestOldTasks(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string var nodeID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } testTask1 := &api.Task{ NodeID: nodeID, ID: "testTask1", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } testTask2 := &api.Task{ NodeID: nodeID, ID: "testTask2", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } { // without correct SessionID should fail stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{}) assert.NoError(t, err) assert.NotNil(t, stream) resp, err := stream.Recv() assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.InvalidArgument) } stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) assert.NoError(t, err) time.Sleep(100 * time.Millisecond) resp, err := stream.Recv() assert.NoError(t, err) // initially no tasks assert.Equal(t, 0, len(resp.Tasks)) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(resp.Tasks), 2) assert.True(t, resp.Tasks[0].ID == "testTask1" && resp.Tasks[1].ID == "testTask2" || resp.Tasks[0].ID == "testTask2" && resp.Tasks[1].ID == "testTask1") err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, &api.Task{ ID: testTask1.ID, NodeID: nodeID, Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateRunning, })) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(resp.Tasks), 2) for _, task := range resp.Tasks { if task.ID == "testTask1" { assert.Equal(t, task.DesiredState, api.TaskStateRunning) } } err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(resp.Tasks), 0) } func TestOldTasksStatusChange(t *testing.T) { t.Parallel() gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string var nodeID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } testTask1 := &api.Task{ NodeID: nodeID, ID: "testTask1", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } testTask2 := &api.Task{ NodeID: nodeID, ID: "testTask2", Status: api.TaskStatus{State: api.TaskStateAssigned}, DesiredState: api.TaskStateReady, } { // without correct SessionID should fail stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{}) assert.NoError(t, err) assert.NotNil(t, stream) resp, err := stream.Recv() assert.Nil(t, resp) assert.Error(t, err) assert.Equal(t, grpc.Code(err), codes.InvalidArgument) } stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) assert.NoError(t, err) time.Sleep(100 * time.Millisecond) resp, err := stream.Recv() assert.NoError(t, err) // initially no tasks assert.Equal(t, 0, len(resp.Tasks)) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) assert.Equal(t, len(resp.Tasks), 2) assert.True(t, resp.Tasks[0].ID == "testTask1" && resp.Tasks[1].ID == "testTask2" || resp.Tasks[0].ID == "testTask2" && resp.Tasks[1].ID == "testTask1") err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, &api.Task{ ID: testTask1.ID, NodeID: nodeID, // only Status is changed for task1 Status: api.TaskStatus{State: api.TaskStateFailed, Err: "1234"}, DesiredState: api.TaskStateReady, })) return nil }) assert.NoError(t, err) // dispatcher shouldn't send snapshot for this update recvChan := make(chan struct{}) go func() { _, _ = stream.Recv() recvChan <- struct{}{} }() select { case <-recvChan: assert.Fail(t, "task.Status update should not trigger dispatcher update") case <-time.After(250 * time.Millisecond): } } func TestOldTasksBatch(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() var expectedSessionID string var nodeID string { stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) assert.NoError(t, err) defer stream.CloseSend() resp, err := stream.Recv() assert.NoError(t, err) assert.NotEmpty(t, resp.SessionID) expectedSessionID = resp.SessionID nodeID = resp.Node.ID } testTask1 := &api.Task{ NodeID: nodeID, ID: "testTask1", Status: api.TaskStatus{State: api.TaskStateAssigned}, } testTask2 := &api.Task{ NodeID: nodeID, ID: "testTask2", Status: api.TaskStatus{State: api.TaskStateAssigned}, } stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) assert.NoError(t, err) resp, err := stream.Recv() assert.NoError(t, err) // initially no tasks assert.Equal(t, 0, len(resp.Tasks)) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateTask(tx, testTask1)) assert.NoError(t, store.CreateTask(tx, testTask2)) return nil }) assert.NoError(t, err) err = gd.Store.Update(func(tx store.Tx) error { assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) return nil }) assert.NoError(t, err) resp, err = stream.Recv() assert.NoError(t, err) // all tasks have been deleted assert.Equal(t, len(resp.Tasks), 0) } func TestOldTasksNoCert(t *testing.T) { gd, err := startDispatcher(DefaultConfig()) assert.NoError(t, err) defer gd.Close() stream, err := gd.Clients[2].Tasks(context.Background(), &api.TasksRequest{}) assert.NoError(t, err) assert.NotNil(t, stream) resp, err := stream.Recv() assert.Nil(t, resp) assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request") }
watch.rs
use watchexec::{ cli::Args, error::Result, pathop::PathOp, run::{ExecHandler, Handler}, }; pub struct
{ args: Args, quiet: bool, inner: ExecHandler, } impl Handler for CwHandler { fn new(mut args: Args) -> Result<Self> { let quiet = args.cmd.last() == Some(&"quiet".into()); if quiet { args.cmd.pop(); } let mut final_cmd = args.cmd.join(" && "); if !quiet { #[cfg(unix)] final_cmd.push_str("; echo [Finished running. Exit status: $?]"); #[cfg(windows)] final_cmd.push_str(" & echo [Finished running. Exit status: %ERRORLEVEL%]"); #[cfg(not(any(unix, windows)))] final_cmd.push_str(" ; echo [Finished running]"); // ^ could be wrong depending on the platform, to be fixed on demand } let mut inner_args = args.clone(); inner_args.cmd = vec![final_cmd]; Ok(Self { args: args.clone(), quiet, inner: ExecHandler::new(inner_args)?, }) } fn on_manual(&mut self) -> Result<bool> { if self.args.once { return Ok(true); } self.start(); self.inner.on_manual() } fn on_update(&mut self, ops: &[PathOp]) -> Result<bool> { self.start(); self.inner.on_update(ops) } } impl CwHandler { fn start(&self) { if !self.quiet { println!("[Running '{}']", self.args.cmd.join(" && ")); } } }
CwHandler
tests.py
import pytest import io from unittest.mock import patch import os import tempfile from microrepl import connect_miniterm @pytest.yield_fixture def fake_stderr(): fake_stderr = io.StringIO() with patch('sys.stderr', fake_stderr): yield fake_stderr @pytest.yield_fixture def fake_sys_exit(): with patch('sys.exit', autospec=True) as fake_exit: yield fake_exit def test_connect_miniterm_suggests_solution_to_perms_problem_on_linux(fake_stderr, fake_sys_exit): nonaccessible_port = tempfile.NamedTemporaryFile() os.chmod(nonaccessible_port.name, 0o000) connect_miniterm(nonaccessible_port.name) error_message = fake_stderr.getvalue() assert "Found micro:bit, but could not connect." in error_message assert "[Errno 13] could not open port" in error_message
assert 'sudo usermod -a -G dialout <your-username>' in error_message
assert "Permission denied: {port!r}".format(port=nonaccessible_port.name) in error_message assert 'On linux, try adding yourself to the "dialout" group' in error_message
error.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct AcceptDomainTransferFromAnotherAwsAccountError { pub kind: AcceptDomainTransferFromAnotherAwsAccountErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum AcceptDomainTransferFromAnotherAwsAccountErrorKind { DomainLimitExceeded(crate::error::DomainLimitExceeded), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for AcceptDomainTransferFromAnotherAwsAccountError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { AcceptDomainTransferFromAnotherAwsAccountErrorKind::DomainLimitExceeded(_inner) => { _inner.fmt(f) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_inner) => { _inner.fmt(f) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { _inner.fmt(f) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for AcceptDomainTransferFromAnotherAwsAccountError { fn code(&self) -> Option<&str> { AcceptDomainTransferFromAnotherAwsAccountError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl AcceptDomainTransferFromAnotherAwsAccountError { pub fn new( kind: AcceptDomainTransferFromAnotherAwsAccountErrorKind, meta: smithy_types::Error, ) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: AcceptDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: AcceptDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_domain_limit_exceeded(&self) -> bool { matches!( &self.kind, AcceptDomainTransferFromAnotherAwsAccountErrorKind::DomainLimitExceeded(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, AcceptDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, AcceptDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_) ) } } impl std::error::Error for AcceptDomainTransferFromAnotherAwsAccountError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { AcceptDomainTransferFromAnotherAwsAccountErrorKind::DomainLimitExceeded(_inner) => { Some(_inner) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_inner) => { Some(_inner) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { Some(_inner) } AcceptDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(_inner) => { Some(_inner.as_ref()) } } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct CancelDomainTransferToAnotherAwsAccountError { pub kind: CancelDomainTransferToAnotherAwsAccountErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum CancelDomainTransferToAnotherAwsAccountErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for CancelDomainTransferToAnotherAwsAccountError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { CancelDomainTransferToAnotherAwsAccountErrorKind::InvalidInput(_inner) => _inner.fmt(f), CancelDomainTransferToAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { _inner.fmt(f) } CancelDomainTransferToAnotherAwsAccountErrorKind::Unhandled(_inner) => _inner.fmt(f), }
fn code(&self) -> Option<&str> { CancelDomainTransferToAnotherAwsAccountError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl CancelDomainTransferToAnotherAwsAccountError { pub fn new( kind: CancelDomainTransferToAnotherAwsAccountErrorKind, meta: smithy_types::Error, ) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: CancelDomainTransferToAnotherAwsAccountErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: CancelDomainTransferToAnotherAwsAccountErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, CancelDomainTransferToAnotherAwsAccountErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, CancelDomainTransferToAnotherAwsAccountErrorKind::OperationLimitExceeded(_) ) } } impl std::error::Error for CancelDomainTransferToAnotherAwsAccountError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { CancelDomainTransferToAnotherAwsAccountErrorKind::InvalidInput(_inner) => Some(_inner), CancelDomainTransferToAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { Some(_inner) } CancelDomainTransferToAnotherAwsAccountErrorKind::Unhandled(_inner) => { Some(_inner.as_ref()) } } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct CheckDomainAvailabilityError { pub kind: CheckDomainAvailabilityErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum CheckDomainAvailabilityErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for CheckDomainAvailabilityError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { CheckDomainAvailabilityErrorKind::InvalidInput(_inner) => _inner.fmt(f), CheckDomainAvailabilityErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), CheckDomainAvailabilityErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for CheckDomainAvailabilityError { fn code(&self) -> Option<&str> { CheckDomainAvailabilityError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl CheckDomainAvailabilityError { pub fn new(kind: CheckDomainAvailabilityErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: CheckDomainAvailabilityErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: CheckDomainAvailabilityErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, CheckDomainAvailabilityErrorKind::InvalidInput(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, CheckDomainAvailabilityErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for CheckDomainAvailabilityError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { CheckDomainAvailabilityErrorKind::InvalidInput(_inner) => Some(_inner), CheckDomainAvailabilityErrorKind::UnsupportedTld(_inner) => Some(_inner), CheckDomainAvailabilityErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct CheckDomainTransferabilityError { pub kind: CheckDomainTransferabilityErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum CheckDomainTransferabilityErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for CheckDomainTransferabilityError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { CheckDomainTransferabilityErrorKind::InvalidInput(_inner) => _inner.fmt(f), CheckDomainTransferabilityErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), CheckDomainTransferabilityErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for CheckDomainTransferabilityError { fn code(&self) -> Option<&str> { CheckDomainTransferabilityError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl CheckDomainTransferabilityError { pub fn new(kind: CheckDomainTransferabilityErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: CheckDomainTransferabilityErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: CheckDomainTransferabilityErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, CheckDomainTransferabilityErrorKind::InvalidInput(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, CheckDomainTransferabilityErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for CheckDomainTransferabilityError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { CheckDomainTransferabilityErrorKind::InvalidInput(_inner) => Some(_inner), CheckDomainTransferabilityErrorKind::UnsupportedTld(_inner) => Some(_inner), CheckDomainTransferabilityErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct DeleteTagsForDomainError { pub kind: DeleteTagsForDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum DeleteTagsForDomainErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for DeleteTagsForDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { DeleteTagsForDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), DeleteTagsForDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), DeleteTagsForDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), DeleteTagsForDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for DeleteTagsForDomainError { fn code(&self) -> Option<&str> { DeleteTagsForDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl DeleteTagsForDomainError { pub fn new(kind: DeleteTagsForDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: DeleteTagsForDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: DeleteTagsForDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, DeleteTagsForDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, DeleteTagsForDomainErrorKind::OperationLimitExceeded(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, DeleteTagsForDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for DeleteTagsForDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { DeleteTagsForDomainErrorKind::InvalidInput(_inner) => Some(_inner), DeleteTagsForDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), DeleteTagsForDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), DeleteTagsForDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct DisableDomainAutoRenewError { pub kind: DisableDomainAutoRenewErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum DisableDomainAutoRenewErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for DisableDomainAutoRenewError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { DisableDomainAutoRenewErrorKind::InvalidInput(_inner) => _inner.fmt(f), DisableDomainAutoRenewErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), DisableDomainAutoRenewErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for DisableDomainAutoRenewError { fn code(&self) -> Option<&str> { DisableDomainAutoRenewError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl DisableDomainAutoRenewError { pub fn new(kind: DisableDomainAutoRenewErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: DisableDomainAutoRenewErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: DisableDomainAutoRenewErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, DisableDomainAutoRenewErrorKind::InvalidInput(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, DisableDomainAutoRenewErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for DisableDomainAutoRenewError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { DisableDomainAutoRenewErrorKind::InvalidInput(_inner) => Some(_inner), DisableDomainAutoRenewErrorKind::UnsupportedTld(_inner) => Some(_inner), DisableDomainAutoRenewErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct DisableDomainTransferLockError { pub kind: DisableDomainTransferLockErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum DisableDomainTransferLockErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for DisableDomainTransferLockError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { DisableDomainTransferLockErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), DisableDomainTransferLockErrorKind::InvalidInput(_inner) => _inner.fmt(f), DisableDomainTransferLockErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), DisableDomainTransferLockErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), DisableDomainTransferLockErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), DisableDomainTransferLockErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for DisableDomainTransferLockError { fn code(&self) -> Option<&str> { DisableDomainTransferLockError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl DisableDomainTransferLockError { pub fn new(kind: DisableDomainTransferLockErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: DisableDomainTransferLockErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: DisableDomainTransferLockErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, DisableDomainTransferLockErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, DisableDomainTransferLockErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, DisableDomainTransferLockErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, DisableDomainTransferLockErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, DisableDomainTransferLockErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for DisableDomainTransferLockError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { DisableDomainTransferLockErrorKind::DuplicateRequest(_inner) => Some(_inner), DisableDomainTransferLockErrorKind::InvalidInput(_inner) => Some(_inner), DisableDomainTransferLockErrorKind::OperationLimitExceeded(_inner) => Some(_inner), DisableDomainTransferLockErrorKind::TldRulesViolation(_inner) => Some(_inner), DisableDomainTransferLockErrorKind::UnsupportedTld(_inner) => Some(_inner), DisableDomainTransferLockErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct EnableDomainAutoRenewError { pub kind: EnableDomainAutoRenewErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum EnableDomainAutoRenewErrorKind { InvalidInput(crate::error::InvalidInput), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for EnableDomainAutoRenewError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { EnableDomainAutoRenewErrorKind::InvalidInput(_inner) => _inner.fmt(f), EnableDomainAutoRenewErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), EnableDomainAutoRenewErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), EnableDomainAutoRenewErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for EnableDomainAutoRenewError { fn code(&self) -> Option<&str> { EnableDomainAutoRenewError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl EnableDomainAutoRenewError { pub fn new(kind: EnableDomainAutoRenewErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: EnableDomainAutoRenewErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: EnableDomainAutoRenewErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, EnableDomainAutoRenewErrorKind::InvalidInput(_)) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, EnableDomainAutoRenewErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, EnableDomainAutoRenewErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for EnableDomainAutoRenewError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { EnableDomainAutoRenewErrorKind::InvalidInput(_inner) => Some(_inner), EnableDomainAutoRenewErrorKind::TldRulesViolation(_inner) => Some(_inner), EnableDomainAutoRenewErrorKind::UnsupportedTld(_inner) => Some(_inner), EnableDomainAutoRenewErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct EnableDomainTransferLockError { pub kind: EnableDomainTransferLockErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum EnableDomainTransferLockErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for EnableDomainTransferLockError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { EnableDomainTransferLockErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), EnableDomainTransferLockErrorKind::InvalidInput(_inner) => _inner.fmt(f), EnableDomainTransferLockErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), EnableDomainTransferLockErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), EnableDomainTransferLockErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), EnableDomainTransferLockErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for EnableDomainTransferLockError { fn code(&self) -> Option<&str> { EnableDomainTransferLockError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl EnableDomainTransferLockError { pub fn new(kind: EnableDomainTransferLockErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: EnableDomainTransferLockErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: EnableDomainTransferLockErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, EnableDomainTransferLockErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, EnableDomainTransferLockErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, EnableDomainTransferLockErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, EnableDomainTransferLockErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, EnableDomainTransferLockErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for EnableDomainTransferLockError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { EnableDomainTransferLockErrorKind::DuplicateRequest(_inner) => Some(_inner), EnableDomainTransferLockErrorKind::InvalidInput(_inner) => Some(_inner), EnableDomainTransferLockErrorKind::OperationLimitExceeded(_inner) => Some(_inner), EnableDomainTransferLockErrorKind::TldRulesViolation(_inner) => Some(_inner), EnableDomainTransferLockErrorKind::UnsupportedTld(_inner) => Some(_inner), EnableDomainTransferLockErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetContactReachabilityStatusError { pub kind: GetContactReachabilityStatusErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetContactReachabilityStatusErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetContactReachabilityStatusError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetContactReachabilityStatusErrorKind::InvalidInput(_inner) => _inner.fmt(f), GetContactReachabilityStatusErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), GetContactReachabilityStatusErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), GetContactReachabilityStatusErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetContactReachabilityStatusError { fn code(&self) -> Option<&str> { GetContactReachabilityStatusError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetContactReachabilityStatusError { pub fn new(kind: GetContactReachabilityStatusErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetContactReachabilityStatusErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetContactReachabilityStatusErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, GetContactReachabilityStatusErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, GetContactReachabilityStatusErrorKind::OperationLimitExceeded(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, GetContactReachabilityStatusErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for GetContactReachabilityStatusError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetContactReachabilityStatusErrorKind::InvalidInput(_inner) => Some(_inner), GetContactReachabilityStatusErrorKind::OperationLimitExceeded(_inner) => Some(_inner), GetContactReachabilityStatusErrorKind::UnsupportedTld(_inner) => Some(_inner), GetContactReachabilityStatusErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetDomainDetailError { pub kind: GetDomainDetailErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetDomainDetailErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetDomainDetailError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetDomainDetailErrorKind::InvalidInput(_inner) => _inner.fmt(f), GetDomainDetailErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), GetDomainDetailErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetDomainDetailError { fn code(&self) -> Option<&str> { GetDomainDetailError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetDomainDetailError { pub fn new(kind: GetDomainDetailErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetDomainDetailErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetDomainDetailErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, GetDomainDetailErrorKind::InvalidInput(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, GetDomainDetailErrorKind::UnsupportedTld(_)) } } impl std::error::Error for GetDomainDetailError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetDomainDetailErrorKind::InvalidInput(_inner) => Some(_inner), GetDomainDetailErrorKind::UnsupportedTld(_inner) => Some(_inner), GetDomainDetailErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetDomainSuggestionsError { pub kind: GetDomainSuggestionsErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetDomainSuggestionsErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetDomainSuggestionsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetDomainSuggestionsErrorKind::InvalidInput(_inner) => _inner.fmt(f), GetDomainSuggestionsErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), GetDomainSuggestionsErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetDomainSuggestionsError { fn code(&self) -> Option<&str> { GetDomainSuggestionsError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetDomainSuggestionsError { pub fn new(kind: GetDomainSuggestionsErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetDomainSuggestionsErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetDomainSuggestionsErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, GetDomainSuggestionsErrorKind::InvalidInput(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, GetDomainSuggestionsErrorKind::UnsupportedTld(_)) } } impl std::error::Error for GetDomainSuggestionsError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetDomainSuggestionsErrorKind::InvalidInput(_inner) => Some(_inner), GetDomainSuggestionsErrorKind::UnsupportedTld(_inner) => Some(_inner), GetDomainSuggestionsErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetOperationDetailError { pub kind: GetOperationDetailErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetOperationDetailErrorKind { InvalidInput(crate::error::InvalidInput), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetOperationDetailError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetOperationDetailErrorKind::InvalidInput(_inner) => _inner.fmt(f), GetOperationDetailErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetOperationDetailError { fn code(&self) -> Option<&str> { GetOperationDetailError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetOperationDetailError { pub fn new(kind: GetOperationDetailErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetOperationDetailErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetOperationDetailErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, GetOperationDetailErrorKind::InvalidInput(_)) } } impl std::error::Error for GetOperationDetailError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetOperationDetailErrorKind::InvalidInput(_inner) => Some(_inner), GetOperationDetailErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ListDomainsError { pub kind: ListDomainsErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ListDomainsErrorKind { InvalidInput(crate::error::InvalidInput), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ListDomainsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ListDomainsErrorKind::InvalidInput(_inner) => _inner.fmt(f), ListDomainsErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ListDomainsError { fn code(&self) -> Option<&str> { ListDomainsError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ListDomainsError { pub fn new(kind: ListDomainsErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ListDomainsErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ListDomainsErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, ListDomainsErrorKind::InvalidInput(_)) } } impl std::error::Error for ListDomainsError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ListDomainsErrorKind::InvalidInput(_inner) => Some(_inner), ListDomainsErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ListOperationsError { pub kind: ListOperationsErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ListOperationsErrorKind { InvalidInput(crate::error::InvalidInput), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ListOperationsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ListOperationsErrorKind::InvalidInput(_inner) => _inner.fmt(f), ListOperationsErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ListOperationsError { fn code(&self) -> Option<&str> { ListOperationsError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ListOperationsError { pub fn new(kind: ListOperationsErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ListOperationsErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ListOperationsErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, ListOperationsErrorKind::InvalidInput(_)) } } impl std::error::Error for ListOperationsError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ListOperationsErrorKind::InvalidInput(_inner) => Some(_inner), ListOperationsErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ListTagsForDomainError { pub kind: ListTagsForDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ListTagsForDomainErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ListTagsForDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ListTagsForDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), ListTagsForDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), ListTagsForDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), ListTagsForDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ListTagsForDomainError { fn code(&self) -> Option<&str> { ListTagsForDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ListTagsForDomainError { pub fn new(kind: ListTagsForDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ListTagsForDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ListTagsForDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, ListTagsForDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, ListTagsForDomainErrorKind::OperationLimitExceeded(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, ListTagsForDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for ListTagsForDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ListTagsForDomainErrorKind::InvalidInput(_inner) => Some(_inner), ListTagsForDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), ListTagsForDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), ListTagsForDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct RegisterDomainError { pub kind: RegisterDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum RegisterDomainErrorKind { DomainLimitExceeded(crate::error::DomainLimitExceeded), DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for RegisterDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { RegisterDomainErrorKind::DomainLimitExceeded(_inner) => _inner.fmt(f), RegisterDomainErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), RegisterDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), RegisterDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), RegisterDomainErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), RegisterDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), RegisterDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for RegisterDomainError { fn code(&self) -> Option<&str> { RegisterDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl RegisterDomainError { pub fn new(kind: RegisterDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: RegisterDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: RegisterDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_domain_limit_exceeded(&self) -> bool { matches!(&self.kind, RegisterDomainErrorKind::DomainLimitExceeded(_)) } pub fn is_duplicate_request(&self) -> bool { matches!(&self.kind, RegisterDomainErrorKind::DuplicateRequest(_)) } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, RegisterDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, RegisterDomainErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!(&self.kind, RegisterDomainErrorKind::TldRulesViolation(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, RegisterDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for RegisterDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { RegisterDomainErrorKind::DomainLimitExceeded(_inner) => Some(_inner), RegisterDomainErrorKind::DuplicateRequest(_inner) => Some(_inner), RegisterDomainErrorKind::InvalidInput(_inner) => Some(_inner), RegisterDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), RegisterDomainErrorKind::TldRulesViolation(_inner) => Some(_inner), RegisterDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), RegisterDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct RejectDomainTransferFromAnotherAwsAccountError { pub kind: RejectDomainTransferFromAnotherAwsAccountErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum RejectDomainTransferFromAnotherAwsAccountErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for RejectDomainTransferFromAnotherAwsAccountError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { RejectDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_inner) => { _inner.fmt(f) } RejectDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { _inner.fmt(f) } RejectDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for RejectDomainTransferFromAnotherAwsAccountError { fn code(&self) -> Option<&str> { RejectDomainTransferFromAnotherAwsAccountError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl RejectDomainTransferFromAnotherAwsAccountError { pub fn new( kind: RejectDomainTransferFromAnotherAwsAccountErrorKind, meta: smithy_types::Error, ) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: RejectDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: RejectDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, RejectDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, RejectDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_) ) } } impl std::error::Error for RejectDomainTransferFromAnotherAwsAccountError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { RejectDomainTransferFromAnotherAwsAccountErrorKind::InvalidInput(_inner) => { Some(_inner) } RejectDomainTransferFromAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { Some(_inner) } RejectDomainTransferFromAnotherAwsAccountErrorKind::Unhandled(_inner) => { Some(_inner.as_ref()) } } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct RenewDomainError { pub kind: RenewDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum RenewDomainErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for RenewDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { RenewDomainErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), RenewDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), RenewDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), RenewDomainErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), RenewDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), RenewDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for RenewDomainError { fn code(&self) -> Option<&str> { RenewDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl RenewDomainError { pub fn new(kind: RenewDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: RenewDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: RenewDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!(&self.kind, RenewDomainErrorKind::DuplicateRequest(_)) } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, RenewDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!(&self.kind, RenewDomainErrorKind::OperationLimitExceeded(_)) } pub fn is_tld_rules_violation(&self) -> bool { matches!(&self.kind, RenewDomainErrorKind::TldRulesViolation(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, RenewDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for RenewDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { RenewDomainErrorKind::DuplicateRequest(_inner) => Some(_inner), RenewDomainErrorKind::InvalidInput(_inner) => Some(_inner), RenewDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), RenewDomainErrorKind::TldRulesViolation(_inner) => Some(_inner), RenewDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), RenewDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ResendContactReachabilityEmailError { pub kind: ResendContactReachabilityEmailErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ResendContactReachabilityEmailErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ResendContactReachabilityEmailError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ResendContactReachabilityEmailErrorKind::InvalidInput(_inner) => _inner.fmt(f), ResendContactReachabilityEmailErrorKind::OperationLimitExceeded(_inner) => { _inner.fmt(f) } ResendContactReachabilityEmailErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), ResendContactReachabilityEmailErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ResendContactReachabilityEmailError { fn code(&self) -> Option<&str> { ResendContactReachabilityEmailError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ResendContactReachabilityEmailError { pub fn new(kind: ResendContactReachabilityEmailErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ResendContactReachabilityEmailErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ResendContactReachabilityEmailErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, ResendContactReachabilityEmailErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, ResendContactReachabilityEmailErrorKind::OperationLimitExceeded(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, ResendContactReachabilityEmailErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for ResendContactReachabilityEmailError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ResendContactReachabilityEmailErrorKind::InvalidInput(_inner) => Some(_inner), ResendContactReachabilityEmailErrorKind::OperationLimitExceeded(_inner) => Some(_inner), ResendContactReachabilityEmailErrorKind::UnsupportedTld(_inner) => Some(_inner), ResendContactReachabilityEmailErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct RetrieveDomainAuthCodeError { pub kind: RetrieveDomainAuthCodeErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum RetrieveDomainAuthCodeErrorKind { InvalidInput(crate::error::InvalidInput), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for RetrieveDomainAuthCodeError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { RetrieveDomainAuthCodeErrorKind::InvalidInput(_inner) => _inner.fmt(f), RetrieveDomainAuthCodeErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), RetrieveDomainAuthCodeErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for RetrieveDomainAuthCodeError { fn code(&self) -> Option<&str> { RetrieveDomainAuthCodeError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl RetrieveDomainAuthCodeError { pub fn new(kind: RetrieveDomainAuthCodeErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: RetrieveDomainAuthCodeErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: RetrieveDomainAuthCodeErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, RetrieveDomainAuthCodeErrorKind::InvalidInput(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, RetrieveDomainAuthCodeErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for RetrieveDomainAuthCodeError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { RetrieveDomainAuthCodeErrorKind::InvalidInput(_inner) => Some(_inner), RetrieveDomainAuthCodeErrorKind::UnsupportedTld(_inner) => Some(_inner), RetrieveDomainAuthCodeErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct TransferDomainError { pub kind: TransferDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum TransferDomainErrorKind { DomainLimitExceeded(crate::error::DomainLimitExceeded), DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for TransferDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { TransferDomainErrorKind::DomainLimitExceeded(_inner) => _inner.fmt(f), TransferDomainErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), TransferDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), TransferDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), TransferDomainErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), TransferDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), TransferDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for TransferDomainError { fn code(&self) -> Option<&str> { TransferDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl TransferDomainError { pub fn new(kind: TransferDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: TransferDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: TransferDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_domain_limit_exceeded(&self) -> bool { matches!(&self.kind, TransferDomainErrorKind::DomainLimitExceeded(_)) } pub fn is_duplicate_request(&self) -> bool { matches!(&self.kind, TransferDomainErrorKind::DuplicateRequest(_)) } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, TransferDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, TransferDomainErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!(&self.kind, TransferDomainErrorKind::TldRulesViolation(_)) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, TransferDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for TransferDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { TransferDomainErrorKind::DomainLimitExceeded(_inner) => Some(_inner), TransferDomainErrorKind::DuplicateRequest(_inner) => Some(_inner), TransferDomainErrorKind::InvalidInput(_inner) => Some(_inner), TransferDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), TransferDomainErrorKind::TldRulesViolation(_inner) => Some(_inner), TransferDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), TransferDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct TransferDomainToAnotherAwsAccountError { pub kind: TransferDomainToAnotherAwsAccountErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum TransferDomainToAnotherAwsAccountErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for TransferDomainToAnotherAwsAccountError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { TransferDomainToAnotherAwsAccountErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), TransferDomainToAnotherAwsAccountErrorKind::InvalidInput(_inner) => _inner.fmt(f), TransferDomainToAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { _inner.fmt(f) } TransferDomainToAnotherAwsAccountErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for TransferDomainToAnotherAwsAccountError { fn code(&self) -> Option<&str> { TransferDomainToAnotherAwsAccountError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl TransferDomainToAnotherAwsAccountError { pub fn new( kind: TransferDomainToAnotherAwsAccountErrorKind, meta: smithy_types::Error, ) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: TransferDomainToAnotherAwsAccountErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: TransferDomainToAnotherAwsAccountErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, TransferDomainToAnotherAwsAccountErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, TransferDomainToAnotherAwsAccountErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, TransferDomainToAnotherAwsAccountErrorKind::OperationLimitExceeded(_) ) } } impl std::error::Error for TransferDomainToAnotherAwsAccountError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { TransferDomainToAnotherAwsAccountErrorKind::DuplicateRequest(_inner) => Some(_inner), TransferDomainToAnotherAwsAccountErrorKind::InvalidInput(_inner) => Some(_inner), TransferDomainToAnotherAwsAccountErrorKind::OperationLimitExceeded(_inner) => { Some(_inner) } TransferDomainToAnotherAwsAccountErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateDomainContactError { pub kind: UpdateDomainContactErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateDomainContactErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateDomainContactError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateDomainContactErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), UpdateDomainContactErrorKind::InvalidInput(_inner) => _inner.fmt(f), UpdateDomainContactErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), UpdateDomainContactErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), UpdateDomainContactErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), UpdateDomainContactErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateDomainContactError { fn code(&self) -> Option<&str> { UpdateDomainContactError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateDomainContactError { pub fn new(kind: UpdateDomainContactErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateDomainContactErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateDomainContactErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, UpdateDomainContactErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, UpdateDomainContactErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, UpdateDomainContactErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, UpdateDomainContactErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, UpdateDomainContactErrorKind::UnsupportedTld(_)) } } impl std::error::Error for UpdateDomainContactError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateDomainContactErrorKind::DuplicateRequest(_inner) => Some(_inner), UpdateDomainContactErrorKind::InvalidInput(_inner) => Some(_inner), UpdateDomainContactErrorKind::OperationLimitExceeded(_inner) => Some(_inner), UpdateDomainContactErrorKind::TldRulesViolation(_inner) => Some(_inner), UpdateDomainContactErrorKind::UnsupportedTld(_inner) => Some(_inner), UpdateDomainContactErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateDomainContactPrivacyError { pub kind: UpdateDomainContactPrivacyErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateDomainContactPrivacyErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateDomainContactPrivacyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateDomainContactPrivacyErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), UpdateDomainContactPrivacyErrorKind::InvalidInput(_inner) => _inner.fmt(f), UpdateDomainContactPrivacyErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), UpdateDomainContactPrivacyErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), UpdateDomainContactPrivacyErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), UpdateDomainContactPrivacyErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateDomainContactPrivacyError { fn code(&self) -> Option<&str> { UpdateDomainContactPrivacyError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateDomainContactPrivacyError { pub fn new(kind: UpdateDomainContactPrivacyErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateDomainContactPrivacyErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateDomainContactPrivacyErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, UpdateDomainContactPrivacyErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, UpdateDomainContactPrivacyErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, UpdateDomainContactPrivacyErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, UpdateDomainContactPrivacyErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, UpdateDomainContactPrivacyErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for UpdateDomainContactPrivacyError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateDomainContactPrivacyErrorKind::DuplicateRequest(_inner) => Some(_inner), UpdateDomainContactPrivacyErrorKind::InvalidInput(_inner) => Some(_inner), UpdateDomainContactPrivacyErrorKind::OperationLimitExceeded(_inner) => Some(_inner), UpdateDomainContactPrivacyErrorKind::TldRulesViolation(_inner) => Some(_inner), UpdateDomainContactPrivacyErrorKind::UnsupportedTld(_inner) => Some(_inner), UpdateDomainContactPrivacyErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateDomainNameserversError { pub kind: UpdateDomainNameserversErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateDomainNameserversErrorKind { DuplicateRequest(crate::error::DuplicateRequest), InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), TldRulesViolation(crate::error::TldRulesViolation), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateDomainNameserversError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateDomainNameserversErrorKind::DuplicateRequest(_inner) => _inner.fmt(f), UpdateDomainNameserversErrorKind::InvalidInput(_inner) => _inner.fmt(f), UpdateDomainNameserversErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), UpdateDomainNameserversErrorKind::TldRulesViolation(_inner) => _inner.fmt(f), UpdateDomainNameserversErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), UpdateDomainNameserversErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateDomainNameserversError { fn code(&self) -> Option<&str> { UpdateDomainNameserversError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateDomainNameserversError { pub fn new(kind: UpdateDomainNameserversErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateDomainNameserversErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateDomainNameserversErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_duplicate_request(&self) -> bool { matches!( &self.kind, UpdateDomainNameserversErrorKind::DuplicateRequest(_) ) } pub fn is_invalid_input(&self) -> bool { matches!( &self.kind, UpdateDomainNameserversErrorKind::InvalidInput(_) ) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, UpdateDomainNameserversErrorKind::OperationLimitExceeded(_) ) } pub fn is_tld_rules_violation(&self) -> bool { matches!( &self.kind, UpdateDomainNameserversErrorKind::TldRulesViolation(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!( &self.kind, UpdateDomainNameserversErrorKind::UnsupportedTld(_) ) } } impl std::error::Error for UpdateDomainNameserversError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateDomainNameserversErrorKind::DuplicateRequest(_inner) => Some(_inner), UpdateDomainNameserversErrorKind::InvalidInput(_inner) => Some(_inner), UpdateDomainNameserversErrorKind::OperationLimitExceeded(_inner) => Some(_inner), UpdateDomainNameserversErrorKind::TldRulesViolation(_inner) => Some(_inner), UpdateDomainNameserversErrorKind::UnsupportedTld(_inner) => Some(_inner), UpdateDomainNameserversErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateTagsForDomainError { pub kind: UpdateTagsForDomainErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateTagsForDomainErrorKind { InvalidInput(crate::error::InvalidInput), OperationLimitExceeded(crate::error::OperationLimitExceeded), UnsupportedTld(crate::error::UnsupportedTld), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateTagsForDomainError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateTagsForDomainErrorKind::InvalidInput(_inner) => _inner.fmt(f), UpdateTagsForDomainErrorKind::OperationLimitExceeded(_inner) => _inner.fmt(f), UpdateTagsForDomainErrorKind::UnsupportedTld(_inner) => _inner.fmt(f), UpdateTagsForDomainErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateTagsForDomainError { fn code(&self) -> Option<&str> { UpdateTagsForDomainError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateTagsForDomainError { pub fn new(kind: UpdateTagsForDomainErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateTagsForDomainErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateTagsForDomainErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, UpdateTagsForDomainErrorKind::InvalidInput(_)) } pub fn is_operation_limit_exceeded(&self) -> bool { matches!( &self.kind, UpdateTagsForDomainErrorKind::OperationLimitExceeded(_) ) } pub fn is_unsupported_tld(&self) -> bool { matches!(&self.kind, UpdateTagsForDomainErrorKind::UnsupportedTld(_)) } } impl std::error::Error for UpdateTagsForDomainError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateTagsForDomainErrorKind::InvalidInput(_inner) => Some(_inner), UpdateTagsForDomainErrorKind::OperationLimitExceeded(_inner) => Some(_inner), UpdateTagsForDomainErrorKind::UnsupportedTld(_inner) => Some(_inner), UpdateTagsForDomainErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ViewBillingError { pub kind: ViewBillingErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ViewBillingErrorKind { InvalidInput(crate::error::InvalidInput), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ViewBillingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ViewBillingErrorKind::InvalidInput(_inner) => _inner.fmt(f), ViewBillingErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ViewBillingError { fn code(&self) -> Option<&str> { ViewBillingError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ViewBillingError { pub fn new(kind: ViewBillingErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ViewBillingErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ViewBillingErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_invalid_input(&self) -> bool { matches!(&self.kind, ViewBillingErrorKind::InvalidInput(_)) } } impl std::error::Error for ViewBillingError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ViewBillingErrorKind::InvalidInput(_inner) => Some(_inner), ViewBillingErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } /// <p>The requested item is not acceptable. For example, for APIs that accept a domain name, the request might specify a domain name /// that doesn't belong to the account that submitted the request. For <code>AcceptDomainTransferFromAnotherAwsAccount</code>, /// the password might be invalid.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct InvalidInput { /// <p>The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation /// that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for InvalidInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("InvalidInput"); formatter.field("message", &self.message); formatter.finish() } } impl InvalidInput { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for InvalidInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "InvalidInput")?; if let Some(inner_1) = &self.message { write!(f, ": {}", inner_1)?; } Ok(()) } } impl std::error::Error for InvalidInput {} /// See [`InvalidInput`](crate::error::InvalidInput) pub mod invalid_input { /// A builder for [`InvalidInput`](crate::error::InvalidInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation /// that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`InvalidInput`](crate::error::InvalidInput) pub fn build(self) -> crate::error::InvalidInput { crate::error::InvalidInput { message: self.message, } } } } impl InvalidInput { /// Creates a new builder-style object to manufacture [`InvalidInput`](crate::error::InvalidInput) pub fn builder() -> crate::error::invalid_input::Builder { crate::error::invalid_input::Builder::default() } } /// <p>Amazon Route 53 does not support this top-level domain (TLD).</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UnsupportedTld { /// <p>Amazon Route 53 does not support this top-level domain (TLD).</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for UnsupportedTld { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UnsupportedTld"); formatter.field("message", &self.message); formatter.finish() } } impl UnsupportedTld { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for UnsupportedTld { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "UnsupportedTld [UnsupportedTLD]")?; if let Some(inner_2) = &self.message { write!(f, ": {}", inner_2)?; } Ok(()) } } impl std::error::Error for UnsupportedTld {} /// See [`UnsupportedTld`](crate::error::UnsupportedTld) pub mod unsupported_tld { /// A builder for [`UnsupportedTld`](crate::error::UnsupportedTld) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>Amazon Route 53 does not support this top-level domain (TLD).</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`UnsupportedTld`](crate::error::UnsupportedTld) pub fn build(self) -> crate::error::UnsupportedTld { crate::error::UnsupportedTld { message: self.message, } } } } impl UnsupportedTld { /// Creates a new builder-style object to manufacture [`UnsupportedTld`](crate::error::UnsupportedTld) pub fn builder() -> crate::error::unsupported_tld::Builder { crate::error::unsupported_tld::Builder::default() } } /// <p>The number of operations or jobs running exceeded the allowed threshold for the account.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct OperationLimitExceeded { /// <p>The number of operations or jobs running exceeded the allowed threshold for the account.</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for OperationLimitExceeded { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("OperationLimitExceeded"); formatter.field("message", &self.message); formatter.finish() } } impl OperationLimitExceeded { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for OperationLimitExceeded { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "OperationLimitExceeded")?; if let Some(inner_3) = &self.message { write!(f, ": {}", inner_3)?; } Ok(()) } } impl std::error::Error for OperationLimitExceeded {} /// See [`OperationLimitExceeded`](crate::error::OperationLimitExceeded) pub mod operation_limit_exceeded { /// A builder for [`OperationLimitExceeded`](crate::error::OperationLimitExceeded) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>The number of operations or jobs running exceeded the allowed threshold for the account.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`OperationLimitExceeded`](crate::error::OperationLimitExceeded) pub fn build(self) -> crate::error::OperationLimitExceeded { crate::error::OperationLimitExceeded { message: self.message, } } } } impl OperationLimitExceeded { /// Creates a new builder-style object to manufacture [`OperationLimitExceeded`](crate::error::OperationLimitExceeded) pub fn builder() -> crate::error::operation_limit_exceeded::Builder { crate::error::operation_limit_exceeded::Builder::default() } } /// <p>The top-level domain does not support this operation.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TldRulesViolation { /// <p>The top-level domain does not support this operation.</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for TldRulesViolation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TldRulesViolation"); formatter.field("message", &self.message); formatter.finish() } } impl TldRulesViolation { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for TldRulesViolation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TldRulesViolation [TLDRulesViolation]")?; if let Some(inner_4) = &self.message { write!(f, ": {}", inner_4)?; } Ok(()) } } impl std::error::Error for TldRulesViolation {} /// See [`TldRulesViolation`](crate::error::TldRulesViolation) pub mod tld_rules_violation { /// A builder for [`TldRulesViolation`](crate::error::TldRulesViolation) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>The top-level domain does not support this operation.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`TldRulesViolation`](crate::error::TldRulesViolation) pub fn build(self) -> crate::error::TldRulesViolation { crate::error::TldRulesViolation { message: self.message, } } } } impl TldRulesViolation { /// Creates a new builder-style object to manufacture [`TldRulesViolation`](crate::error::TldRulesViolation) pub fn builder() -> crate::error::tld_rules_violation::Builder { crate::error::tld_rules_violation::Builder::default() } } /// <p>The request is already in progress for the domain.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DuplicateRequest { /// <p>The request is already in progress for the domain.</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for DuplicateRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DuplicateRequest"); formatter.field("message", &self.message); formatter.finish() } } impl DuplicateRequest { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for DuplicateRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "DuplicateRequest")?; if let Some(inner_5) = &self.message { write!(f, ": {}", inner_5)?; } Ok(()) } } impl std::error::Error for DuplicateRequest {} /// See [`DuplicateRequest`](crate::error::DuplicateRequest) pub mod duplicate_request { /// A builder for [`DuplicateRequest`](crate::error::DuplicateRequest) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>The request is already in progress for the domain.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`DuplicateRequest`](crate::error::DuplicateRequest) pub fn build(self) -> crate::error::DuplicateRequest { crate::error::DuplicateRequest { message: self.message, } } } } impl DuplicateRequest { /// Creates a new builder-style object to manufacture [`DuplicateRequest`](crate::error::DuplicateRequest) pub fn builder() -> crate::error::duplicate_request::Builder { crate::error::duplicate_request::Builder::default() } } /// <p>The number of domains has exceeded the allowed threshold for the account.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DomainLimitExceeded { /// <p>The number of domains has exceeded the allowed threshold for the account.</p> pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for DomainLimitExceeded { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DomainLimitExceeded"); formatter.field("message", &self.message); formatter.finish() } } impl DomainLimitExceeded { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for DomainLimitExceeded { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "DomainLimitExceeded")?; if let Some(inner_6) = &self.message { write!(f, ": {}", inner_6)?; } Ok(()) } } impl std::error::Error for DomainLimitExceeded {} /// See [`DomainLimitExceeded`](crate::error::DomainLimitExceeded) pub mod domain_limit_exceeded { /// A builder for [`DomainLimitExceeded`](crate::error::DomainLimitExceeded) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>The number of domains has exceeded the allowed threshold for the account.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`DomainLimitExceeded`](crate::error::DomainLimitExceeded) pub fn build(self) -> crate::error::DomainLimitExceeded { crate::error::DomainLimitExceeded { message: self.message, } } } } impl DomainLimitExceeded { /// Creates a new builder-style object to manufacture [`DomainLimitExceeded`](crate::error::DomainLimitExceeded) pub fn builder() -> crate::error::domain_limit_exceeded::Builder { crate::error::domain_limit_exceeded::Builder::default() } }
} } impl smithy_types::retry::ProvideErrorKind for CancelDomainTransferToAnotherAwsAccountError {
26.channel-directions.go
package main import "fmt"
func pong(pings <-chan string, pongs chan<- string) { msg := <-pings pongs <- msg } func main() { pings := make(chan string, 1) pongs := make(chan string, 1) ping(pings, "passed message") pong(pings, pongs) fmt.Println(<-pongs) }
func ping(pings chan<- string, msg string) { pings <- msg }
session.go
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gofer import ( "fmt" "sync" "gvisor.googlesource.com/gvisor/pkg/p9" "gvisor.googlesource.com/gvisor/pkg/refs" "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/device" "gvisor.googlesource.com/gvisor/pkg/sentry/fs" "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" "gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix/transport" "gvisor.googlesource.com/gvisor/pkg/unet" ) // +stateify savable type endpointMaps struct { // mu protexts the direntMap, the keyMap, and the pathMap below. mu sync.RWMutex `state:"nosave"` // direntMap links sockets to their dirents. // It is filled concurrently with the keyMap and is stored upon save. // Before saving, this map is used to populate the pathMap. direntMap map[transport.BoundEndpoint]*fs.Dirent // keyMap links MultiDeviceKeys (containing inode IDs) to their sockets. // It is not stored during save because the inode ID may change upon restore. keyMap map[device.MultiDeviceKey]transport.BoundEndpoint `state:"nosave"` // pathMap links the sockets to their paths. // It is filled before saving from the direntMap and is stored upon save. // Upon restore, this map is used to re-populate the keyMap. pathMap map[transport.BoundEndpoint]string } // add adds the endpoint to the maps. // A reference is taken on the dirent argument. // // Precondition: maps must have been locked with 'lock'. func (e *endpointMaps) add(key device.MultiDeviceKey, d *fs.Dirent, ep transport.BoundEndpoint) { e.keyMap[key] = ep d.IncRef() e.direntMap[ep] = d } // remove deletes the key from the maps. // // Precondition: maps must have been locked with 'lock'. func (e *endpointMaps) remove(key device.MultiDeviceKey) { endpoint := e.get(key) delete(e.keyMap, key) d := e.direntMap[endpoint] d.DecRef() delete(e.direntMap, endpoint) } // lock blocks other addition and removal operations from happening while // the backing file is being created or deleted. Returns a function that unlocks // the endpoint map. func (e *endpointMaps) lock() func() { e.mu.Lock() return func() { e.mu.Unlock() } } // get returns the endpoint mapped to the given key. // // Precondition: maps must have been locked for reading. func (e *endpointMaps) get(key device.MultiDeviceKey) transport.BoundEndpoint { return e.keyMap[key] } // session holds state for each 9p session established during sys_mount. // // +stateify savable type session struct { refs.AtomicRefCount // msize is the value of the msize mount option, see fs/gofer/fs.go. msize uint32 `state:"wait"` // version is the value of the version mount option, see fs/gofer/fs.go. version string `state:"wait"` // cachePolicy is the cache policy. It may be either cacheAll or cacheNone. cachePolicy cachePolicy `state:"wait"` // aname is the value of the aname mount option, see fs/gofer/fs.go. aname string `state:"wait"` // The client associated with this session. This will be initialized lazily. client *p9.Client `state:"nosave"` // The p9.File pointing to attachName via the client. This will be initialized // lazily. attach contextFile `state:"nosave"` // Flags provided to the mount. superBlockFlags fs.MountSourceFlags `state:"wait"` // connID is a unique identifier for the session connection. connID string `state:"wait"` // inodeMappings contains mappings of fs.Inodes associated with this session // to paths relative to the attach point, where inodeMappings is keyed by // Inode.StableAttr.InodeID. inodeMappings map[uint64]string `state:"wait"` // mounter is the EUID/EGID that mounted this file system. mounter fs.FileOwner `state:"wait"` // endpoints is used to map inodes that represent socket files to their // corresponding endpoint. Socket files are created as regular files in the // gofer and their presence in this map indicate that they should indeed be // socket files. This allows unix domain sockets to be used with paths that // belong to a gofer. // // TODO: there are few possible races with someone stat'ing the // file and another deleting it concurrently, where the file will not be // reported as socket file. endpoints *endpointMaps `state:"wait"` } // Destroy tears down the session. func (s *session) Destroy() { s.client.Close() } // Revalidate implements MountSource.Revalidate. func (s *session) Revalidate(ctx context.Context, name string, parent, child *fs.Inode) bool { return s.cachePolicy.revalidate(ctx, name, parent, child) } // Keep implements MountSource.Keep. func (s *session) Keep(d *fs.Dirent) bool { return s.cachePolicy.keep(d) } // ResetInodeMappings implements fs.MountSourceOperations.ResetInodeMappings. func (s *session) ResetInodeMappings() { s.inodeMappings = make(map[uint64]string) } // SaveInodeMapping implements fs.MountSourceOperations.SaveInodeMapping. func (s *session) SaveInodeMapping(inode *fs.Inode, path string) { // This is very unintuitive. We *CANNOT* trust the inode's StableAttrs, // because overlay copyUp may have changed them out from under us. // So much for "immutable". sattr := inode.InodeOperations.(*inodeOperations).fileState.sattr s.inodeMappings[sattr.InodeID] = path } // newInodeOperations creates a new 9p fs.InodeOperations backed by a p9.File and attributes // (p9.QID, p9.AttrMask, p9.Attr). // // Endpoints lock must not be held if socket == false. func newInodeOperations(ctx context.Context, s *session, file contextFile, qid p9.QID, valid p9.AttrMask, attr p9.Attr, socket bool) (fs.StableAttr, *inodeOperations) { deviceKey := device.MultiDeviceKey{ Device: attr.RDev, SecondaryDevice: s.connID, Inode: qid.Path, } sattr := fs.StableAttr{ Type: ntype(attr), DeviceID: goferDevice.DeviceID(), InodeID: goferDevice.Map(deviceKey), BlockSize: bsize(attr), } if s.endpoints != nil { if socket
else { // If unix sockets are allowed on this filesystem, check if this file is // supposed to be a socket file. unlock := s.endpoints.lock() if s.endpoints.get(deviceKey) != nil { sattr.Type = fs.Socket } unlock() } } fileState := &inodeFileState{ s: s, file: file, sattr: sattr, key: deviceKey, } uattr := unstable(ctx, valid, attr, s.mounter, s.client) return sattr, &inodeOperations{ fileState: fileState, cachingInodeOps: fsutil.NewCachingInodeOperations(ctx, fileState, uattr, s.superBlockFlags.ForcePageCache), } } // Root returns the root of a 9p mount. This mount is bound to a 9p server // based on conn. Otherwise configuration parameters are: // // * dev: connection id // * filesystem: the filesystem backing the mount // * superBlockFlags: the mount flags describing general mount options // * opts: parsed 9p mount options func Root(ctx context.Context, dev string, filesystem fs.Filesystem, superBlockFlags fs.MountSourceFlags, o opts) (*fs.Inode, error) { // The mounting EUID/EGID will be cached by this file system. This will // be used to assign ownership to files that the Gofer owns. mounter := fs.FileOwnerFromContext(ctx) conn, err := unet.NewSocket(o.fd) if err != nil { return nil, err } // Construct the session. s := &session{ connID: dev, msize: o.msize, version: o.version, cachePolicy: o.policy, aname: o.aname, superBlockFlags: superBlockFlags, mounter: mounter, } if o.privateunixsocket { s.endpoints = newEndpointMaps() } // Construct the MountSource with the session and superBlockFlags. m := fs.NewMountSource(s, filesystem, superBlockFlags) // Send the Tversion request. s.client, err = p9.NewClient(conn, s.msize, s.version) if err != nil { // Drop our reference on the session, it needs to be torn down. s.DecRef() return nil, err } // Notify that we're about to call the Gofer and block. ctx.UninterruptibleSleepStart(false) // Send the Tattach request. s.attach.file, err = s.client.Attach(s.aname) ctx.UninterruptibleSleepFinish(false) if err != nil { // Same as above. s.DecRef() return nil, err } qid, valid, attr, err := s.attach.getAttr(ctx, p9.AttrMaskAll()) if err != nil { s.attach.close(ctx) // Same as above, but after we execute the Close request. s.DecRef() return nil, err } sattr, iops := newInodeOperations(ctx, s, s.attach, qid, valid, attr, false) return fs.NewInode(iops, m, sattr), nil } // newEndpointMaps creates a new endpointMaps. func newEndpointMaps() *endpointMaps { return &endpointMaps{ direntMap: make(map[transport.BoundEndpoint]*fs.Dirent), keyMap: make(map[device.MultiDeviceKey]transport.BoundEndpoint), pathMap: make(map[transport.BoundEndpoint]string), } } // fillKeyMap populates key and dirent maps upon restore from saved // pathmap. func (s *session) fillKeyMap(ctx context.Context) error { unlock := s.endpoints.lock() defer unlock() for ep, dirPath := range s.endpoints.pathMap { _, file, err := s.attach.walk(ctx, splitAbsolutePath(dirPath)) if err != nil { return fmt.Errorf("error filling endpointmaps, failed to walk to %q: %v", dirPath, err) } qid, _, attr, err := file.getAttr(ctx, p9.AttrMaskAll()) if err != nil { return fmt.Errorf("failed to get file attributes of %s: %v", dirPath, err) } key := device.MultiDeviceKey{ Device: attr.RDev, SecondaryDevice: s.connID, Inode: qid.Path, } s.endpoints.keyMap[key] = ep } return nil } // fillPathMap populates paths for endpoints from dirents in direntMap // before save. func (s *session) fillPathMap() error { unlock := s.endpoints.lock() defer unlock() for ep, dir := range s.endpoints.direntMap { mountRoot := dir.MountRoot() defer mountRoot.DecRef() dirPath, _ := dir.FullName(mountRoot) if dirPath == "" { return fmt.Errorf("error getting path from dirent") } s.endpoints.pathMap[ep] = dirPath } return nil } // restoreEndpointMaps recreates and fills the key and dirent maps. func (s *session) restoreEndpointMaps(ctx context.Context) error { // When restoring, only need to create the keyMap because the dirent and path // maps got stored through the save. s.endpoints.keyMap = make(map[device.MultiDeviceKey]transport.BoundEndpoint) if err := s.fillKeyMap(ctx); err != nil { return fmt.Errorf("failed to insert sockets into endpoint map: %v", err) } // Re-create pathMap because it can no longer be trusted as socket paths can // change while process continues to run. Empty pathMap will be re-filled upon // next save. s.endpoints.pathMap = make(map[transport.BoundEndpoint]string) return nil }
{ sattr.Type = fs.Socket }
fragment_config_overrides.py
# This config is incomplete, but will specify all the required key # when combined with fragment_config_base.py. c.jira.project_key = "TEST" c.jira.max_retries = 7 c.jira.sync_milestones = False c.github.repository = "testing/test-repo"
basic.tsx
import React, { useState } from 'react'; import { Slider } from 'starfall'; const BasicUsage: React.FC = () => { const [state, setState] = useState(0); const [state2, setState2] = useState(10); const [state3, setState3] = useState(101); const [state4, setState4] = useState(101); return ( <> <div style={{ padding: 40 }}> <Slider value={state} onChange={setState} /> </div> <div style={{ padding: 40 }}> <Slider value={state2} onChange={setState2} min={10} max={20} step={2} label={[ { value: 14, }, ]} /> </div> <div style={{ padding: 40 }}> <Slider value={state3}
max={201} step={2} label={[ { value: 114, }, ]} /> </div>{' '} <div style={{ padding: 40 }}> <Slider value={state4} onChange={setState4} min={101} max={201} step={null} label={[ { value: 101, }, { value: 114, }, { value: 133, }, { value: 201, }, ]} /> </div> </> ); }; export default BasicUsage;
onChange={setState3} min={101}
dandelion_monitor.rs
// Copyright 2020 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use chrono::prelude::Utc; use rand::{thread_rng, Rng}; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use crate::common::adapters::DandelionAdapter; use crate::core::core::hash::Hashed; use crate::core::core::transaction; use crate::core::core::verifier_cache::VerifierCache; use crate::pool::{DandelionConfig, Pool, PoolEntry, PoolError, TransactionPool, TxSource}; use crate::util::{RwLock, StopState}; /// A process to monitor transactions in the stempool. /// With Dandelion, transaction can be broadcasted in stem or fluff phase. /// When sent in stem phase, the transaction is relayed to only node: the /// dandelion relay. In order to maintain reliability a timer is started for /// each transaction sent in stem phase. This function will monitor the /// stempool and test if the timer is expired for each transaction. In that case /// the transaction will be sent in fluff phase (to multiple peers) instead of /// sending only to the peer relay. pub fn monitor_transactions( dandelion_config: DandelionConfig, tx_pool: Arc<RwLock<TransactionPool>>, adapter: Arc<dyn DandelionAdapter>, verifier_cache: Arc<RwLock<dyn VerifierCache>>, stop_state: Arc<StopState>, ) -> std::io::Result<thread::JoinHandle<()>> { debug!("Started Dandelion transaction monitor."); thread::Builder::new() .name("dandelion".to_string()) .spawn(move || { let run_interval = Duration::from_secs(10); let mut last_run = Instant::now() .checked_sub(Duration::from_secs(20)) .unwrap_or_else(|| Instant::now()); loop { // Halt Dandelion monitor if we have been notified that we are stopping. if stop_state.is_stopped() { break; } if last_run.elapsed() > run_interval { if !adapter.is_stem() { let _ = process_fluff_phase( &dandelion_config, &tx_pool, &adapter, &verifier_cache, ) .map_err(|e| { error!("dand_mon: Problem processing fluff phase. {}", e); }); } // Now find all expired entries based on embargo timer. let _ = process_expired_entries(&dandelion_config, &tx_pool).map_err(|e| { error!("dand_mon: Problem processing expired entries. {}", e); }); // Handle the tx above *before* we transition to next epoch. // This gives us an opportunity to do the final "fluff" before we start // stemming on the subsequent epoch. if adapter.is_expired() { adapter.next_epoch(); } last_run = Instant::now(); } // Monitor loops every 10s, but check stop flag every second. thread::sleep(Duration::from_secs(1)); } })
// Query the pool for transactions older than the cutoff. // Used for both periodic fluffing and handling expired embargo timer. fn select_txs_cutoff(pool: &Pool, cutoff_secs: u16) -> Vec<PoolEntry> { let cutoff = Utc::now().timestamp() - cutoff_secs as i64; pool.entries .iter() .filter(|x| x.tx_at.timestamp() < cutoff) .cloned() .collect() } fn process_fluff_phase( dandelion_config: &DandelionConfig, tx_pool: &Arc<RwLock<TransactionPool>>, adapter: &Arc<dyn DandelionAdapter>, verifier_cache: &Arc<RwLock<dyn VerifierCache>>, ) -> Result<(), PoolError> { // Take a write lock on the txpool for the duration of this processing. let mut tx_pool = tx_pool.write(); let all_entries = tx_pool.stempool.entries.clone(); if all_entries.is_empty() { return Ok(()); } let cutoff_secs = dandelion_config.aggregation_secs; let cutoff_entries = select_txs_cutoff(&tx_pool.stempool, cutoff_secs); // If epoch is expired, fluff *all* outstanding entries in stempool. // If *any* entry older than aggregation_secs (30s) then fluff *all* entries. // Otherwise we are done for now and we can give txs more time to aggregate. if !adapter.is_expired() && cutoff_entries.is_empty() { return Ok(()); } let header = tx_pool.chain_head()?; let fluffable_txs = { let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?; let txs: Vec<_> = all_entries.into_iter().map(|x| x.tx).collect(); tx_pool.stempool.validate_raw_txs( &txs, txpool_tx, &header, transaction::Weighting::NoLimit, )? }; debug!( "dand_mon: Found {} txs in local stempool to fluff", fluffable_txs.len() ); let agg_tx = transaction::aggregate(fluffable_txs)?; agg_tx.validate( transaction::Weighting::AsTransaction, verifier_cache.clone(), )?; tx_pool.add_to_pool(TxSource::Fluff, agg_tx, false, &header)?; Ok(()) } fn process_expired_entries( dandelion_config: &DandelionConfig, tx_pool: &Arc<RwLock<TransactionPool>>, ) -> Result<(), PoolError> { // Take a write lock on the txpool for the duration of this processing. let mut tx_pool = tx_pool.write(); let embargo_secs = dandelion_config.embargo_secs + thread_rng().gen_range(0, 31); let expired_entries = select_txs_cutoff(&tx_pool.stempool, embargo_secs); if expired_entries.is_empty() { return Ok(()); } debug!("dand_mon: Found {} expired txs.", expired_entries.len()); let header = tx_pool.chain_head()?; for entry in expired_entries { let txhash = entry.tx.hash(); match tx_pool.add_to_pool(TxSource::EmbargoExpired, entry.tx, false, &header) { Ok(_) => info!( "dand_mon: embargo expired for {}, fluffed successfully.", txhash ), Err(e) => warn!("dand_mon: failed to fluff expired tx {}, {:?}", txhash, e), }; } Ok(()) }
}
ich_vtr_el2.rs
/* * MIT License * * Copyright (c) 2020 Reto Achermann * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * SPDX-License-Identifier: MIT */ /*********************************************************************************************** * *** * * !!!! WARNING: THIS FILE IS AUTO GENERATED. ANY CHANGES MAY BE OVERWRITTEN !!!! * * Generated on: 2020-10-05T16:49:32.049044 * Version: Armv8.7-A-2020-09 * Source: https://developer.arm.com/-/media/developer/products/architecture/armv8-a-architecture/2020-09/SysReg_xml_v87A-2020-09.tar.gz * * !!!! WARNING: THIS FILE IS AUTO GENERATED. ANY CHANGES MAY BE OVERWRITTEN !!!! * ********************************************************************************************** * * */ /* * ================================================================================================ * Register Information * ================================================================================================ * * Register: Interrupt Controller VGIC Type Register (ich_vtr_el2) * Group: Virtualization registers * Type: 64-bit Register * Description: Reports supported GIC virtualization features. * File: AArch64-ich_vtr_el2.xml */ /* * ================================================================================================ * Register Read/Write Functions * ================================================================================================ */ /// reading the Interrupt Controller VGIC Type Register (ich_vtr_el2) register pub fn reg_rawrd() -> u64 { let mut regval: u64; unsafe { // MRS <Xt>, ICH_VTR_EL2 llvm_asm!("mrs $0, ich_vtr_el2" : "=r"(regval)); } return regval; } // register is not writable. not emitting write accessor /* * ================================================================================================ * Register Fields Read/Write Functions * ================================================================================================ */ /// reads field val from register pub fn pribits_read() -> u64 { // bits 29..31 let val = reg_rawrd(); (val >> 29) & 0x7 } // register is not writable, omitting writing to field /// reads field val from register pub fn prebits_read() -> u64 { // bits 26..28 let val = reg_rawrd(); (val >> 26) & 0x7 } // register is not writable, omitting writing to field /// reads field val from register pub fn idbits_read() -> u64 { // bits 23..25 let val = reg_rawrd(); (val >> 23) & 0x7 } // register is not writable, omitting writing to field /// reads field val from register pub fn seis_read() -> u64 { // bits 22..22 let val = reg_rawrd(); (val >> 22) & 0x1 } // register is not writable, omitting writing to field /// reads field val from register pub fn a3v_read() -> u64 { // bits 21..21 let val = reg_rawrd(); (val >> 21) & 0x1 } // register is not writable, omitting writing to field /// reads field val from register pub fn nv4_read() -> u64 { // bits 20..20 let val = reg_rawrd(); (val >> 20) & 0x1 } // register is not writable, omitting writing to field /// reads field val from register pub fn tds_read() -> u64
// register is not writable, omitting writing to field /// reads field val from register pub fn dvim_read() -> u64 { // bits 18..18 let val = reg_rawrd(); (val >> 18) & 0x1 } // register is not writable, omitting writing to field /// reads field val from register pub fn listregs_read() -> u64 { // bits 0..4 let val = reg_rawrd(); (val >> 0) & 0x1f } // register is not writable, omitting writing to field /* * ================================================================================================ * Data Structure Definitions * ================================================================================================ */ /// struct holding a copy of the Interrupt Controller VGIC Type Register value in memory pub struct RegVal { val: u64, } /// struct implementation for accessing the fields of register ich_vtr_el2 impl RegVal { // creates a new default value pub fn default() -> RegVal { RegVal { val: 0 } } /// inserts field val into current value pub fn current(&mut self) -> RegVal { let curval = reg_rawrd() & 0xfffc001f; RegVal { val: curval } } /// extracts field val from current value pub fn read(&mut self) { self.val = reg_rawrd() & 0xfffc001f } // no write() method as it is read only // sets the value of the struct pub fn set(&mut self, newval: u64) { self.val = newval & 4294705183; } // gets the value of the struct pub fn get(&self) -> u64 { self.val } /// extracts field val from current value pub fn pribits_extract(&mut self) -> u64 { // bits 29..31 (self.val >> 29) & 0x7 } // no insert() method for field pribits /// extracts field val from current value pub fn prebits_extract(&mut self) -> u64 { // bits 26..28 (self.val >> 26) & 0x7 } // no insert() method for field prebits /// extracts field val from current value pub fn idbits_extract(&mut self) -> u64 { // bits 23..25 (self.val >> 23) & 0x7 } // no insert() method for field idbits /// extracts field val from current value pub fn seis_extract(&mut self) -> u64 { // bits 22..22 (self.val >> 22) & 0x1 } // no insert() method for field seis /// extracts field val from current value pub fn a3v_extract(&mut self) -> u64 { // bits 21..21 (self.val >> 21) & 0x1 } // no insert() method for field a3v /// extracts field val from current value pub fn nv4_extract(&mut self) -> u64 { // bits 20..20 (self.val >> 20) & 0x1 } // no insert() method for field nv4 /// extracts field val from current value pub fn tds_extract(&mut self) -> u64 { // bits 19..19 (self.val >> 19) & 0x1 } // no insert() method for field tds /// extracts field val from current value pub fn dvim_extract(&mut self) -> u64 { // bits 18..18 (self.val >> 18) & 0x1 } // no insert() method for field dvim /// extracts field val from current value pub fn listregs_extract(&mut self) -> u64 { // bits 0..4 (self.val >> 0) & 0x1f } // no insert() method for field listregs }
{ // bits 19..19 let val = reg_rawrd(); (val >> 19) & 0x1 }
reference.go
// Copyright 2018-2019 Workiva Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package eva import "github.com/Workiva/eva-client-go/edn" // Reference type Reference interface { edn.Serializable // Type of reference Type() ChannelType // AddProperty AddProperty(name string, value edn.Serializable) error // GetProperty GetProperty(name string) edn.Serializable } const ( ErrInvalidSerializer = edn.ErrorMessage("Invalid serializer") LabelReferenceProperty = "label" AsOfReferenceProperty = "as-of" ) type refImpl struct { refType ChannelType properties map[string]edn.Serializable } // newReference creates a new request. func newReference(refType ChannelType, properties map[string]edn.Serializable) (ref Reference, err error)
func (ref *refImpl) String() string { var str string PanicOnError(func() error { var err error str, err = ref.Serialize(edn.EvaEdnMimeType) return err }) return str } // Serialize the reference. func (ref *refImpl) Serialize(with edn.Serializer) (value string, err error) { if with != nil { var elem edn.CollectionElement if elem, err = edn.NewMap(); err == nil { for name, value := range ref.properties { if value != nil { var symbol edn.SymbolElement if symbol, err = edn.NewKeywordElement(name); err == nil { switch v := value.(type) { case edn.Element: err = elem.Append(symbol, v) case rawStringImpl: err = elem.Append(symbol, edn.NewStringElement(v.String())) case rawIntImpl: err = elem.Append(symbol, edn.NewIntegerElement(v.Int())) default: err = edn.MakeError(edn.ErrInvalidInput, value) } } } if err != nil { break } } } if err == nil { if err = elem.SetTag(string(ref.Type())); err == nil { value, err = elem.Serialize(with) } } } else { err = edn.MakeError(ErrInvalidSerializer, nil) } return value, err } // Type of this reference func (ref *refImpl) Type() ChannelType { return ref.refType } // AddProperty will add the property by name, or if the value is nil, will remove it. func (ref *refImpl) AddProperty(name string, value edn.Serializable) error { if value != nil { ref.properties[name] = value } else { delete(ref.properties, name) } return nil } // GetProperty returns the property by name func (ref *refImpl) GetProperty(name string) edn.Serializable { return ref.properties[name] } func NewConnectionReference(label string) (ref Reference, err error) { return newReference(ConnectionReferenceType, map[string]edn.Serializable{ LabelReferenceProperty: RawString(label), }) } func NewSnapshotAsOfReference(label string, asOf interface{}) (ref Reference, err error) { var asOfElem edn.Serializable if asOfElem, err = decodeSerializable(asOf); err == nil { ref, err = newReference(SnapshotReferenceType, map[string]edn.Serializable{ LabelReferenceProperty: RawString(label), AsOfReferenceProperty: asOfElem, }) } return ref, err } func NewSnapshotReference(label string) (req Reference, err error) { return NewSnapshotAsOfReference(label, nil) }
{ if properties == nil { properties = make(map[string]edn.Serializable) } ref = &refImpl{ refType: refType, properties: properties, } return ref, err }
list_test.go
// Copyright 2017 The Kubernetes Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pod_test import ( "errors" "reflect" "testing" "github.com/kubernetes/dashboard/src/app/backend/api" metricapi "github.com/kubernetes/dashboard/src/app/backend/integration/metric/api" "github.com/kubernetes/dashboard/src/app/backend/resource/common" "github.com/kubernetes/dashboard/src/app/backend/resource/dataselect" "github.com/kubernetes/dashboard/src/app/backend/resource/pod" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestGetPodListFromChannels(t *testing.T) { cases := []struct { k8sPod v1.PodList k8sPodError error pods *v1.PodList expected *pod.PodList expectedError error }{ { v1.PodList{}, nil, &v1.PodList{}, &pod.PodList{ ListMeta: api.ListMeta{}, Pods: []pod.Pod{}, CumulativeMetrics: make([]metricapi.Metric, 0), Errors: []error{}, }, nil, }, { v1.PodList{}, errors.New("MyCustomError"), &v1.PodList{}, nil, errors.New("MyCustomError"), }, { v1.PodList{}, &k8serrors.StatusError{}, &v1.PodList{}, nil, &k8serrors.StatusError{}, }, { v1.PodList{}, &k8serrors.StatusError{ErrStatus: metav1.Status{}}, &v1.PodList{}, nil, &k8serrors.StatusError{ErrStatus: metav1.Status{}}, }, { v1.PodList{}, &k8serrors.StatusError{ErrStatus: metav1.Status{Reason: "foo-bar"}}, &v1.PodList{}, nil, &k8serrors.StatusError{ErrStatus: metav1.Status{Reason: "foo-bar"}}, }, { v1.PodList{ Items: []v1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-name", Namespace: "pod-namespace", Labels: map[string]string{"key": "value"}, CreationTimestamp: metav1.Unix(111, 222), }, }}, }, nil, &v1.PodList{}, &pod.PodList{ ListMeta: api.ListMeta{TotalItems: 1}, CumulativeMetrics: make([]metricapi.Metric, 0), Status: common.ResourceStatus{Pending: 1}, Pods: []pod.Pod{{ ObjectMeta: api.ObjectMeta{ Name: "pod-name", Namespace: "pod-namespace", Labels: map[string]string{"key": "value"}, CreationTimestamp: metav1.Unix(111, 222), }, TypeMeta: api.TypeMeta{Kind: api.ResourceKindPod}, PodStatus: pod.PodStatus{Status: string(v1.PodPending)}, Warnings: []common.Event{}, }}, Errors: []error{}, }, nil, }, } for _, c := range cases { channels := &common.ResourceChannels{ PodList: common.PodListChannel{ List: make(chan *v1.PodList, 1), Error: make(chan error, 1), }, EventList: common.EventListChannel{ List: make(chan *v1.EventList, 1), Error: make(chan error, 1), }, } channels.PodList.Error <- c.k8sPodError channels.PodList.List <- &c.k8sPod channels.EventList.List <- &v1.EventList{} channels.EventList.Error <- nil actual, err := pod.GetPodListFromChannels(channels, dataselect.NoDataSelect, nil) if !reflect.DeepEqual(actual, c.expected)
if !reflect.DeepEqual(err, c.expectedError) { t.Errorf("GetPodListFromChannels() ==\n %#v\nExpected: %#v", err, c.expectedError) } } }
{ t.Errorf("GetPodListFromChannels() ==\n %#v\nExpected: %#v", actual, c.expected) }
output_inference_images.py
#!/usr/bin/env python3 """ Project title: CollembolAI Authors: Stephan Weißbach, Stanislav Sys, Clément Schneider Original repository: https://github.com/stasys-hub/Collembola_AI.git Module title: output_inference_images .py Purpose: draws bounding boxes from annotation on pictures. If provided with groundtruth, it will also specifiy correctness of predictions Dependencies: See ReadMe Last Update: 18.02.2022 """ from PIL import Image, ImageFont, ImageDraw import os from utils.cocoutils import coco2df def draw_coco_bbox( coco, out_dir, coco_dir, eval_mode=False, prefix="annotated", line_width=10, fontsize=80, fontYshift=-70, ): """ Detectron2 module for writing annotated pictures was not so explicit to me, and default output not so pretty. This function will draw the annotation on the pictures of a coco dataset. The dataset can be provided as a coco instance, or as a dataframe resulting from coco2df. Modified pictures are written to the out_dir, with a name prefix. To adjust display, simply change line_width (= box line), font_size (= label font). Labels text can be shifted vertically with fontYshift. """ # define some colors for bounding boxes with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), "colors.txt"), "r" ) as colorfile: colors = [color.replace("\n", "") for color in colorfile] Image.MAX_IMAGE_PIXELS = None fnt = ImageFont.truetype( os.path.join(os.path.dirname(os.path.realpath(__file__)), "FreeMono.ttf"), fontsize, )
except: coco_df = coco # create label for bounding box if eval_mode: coco_df["label"] = [ f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)} {'true detection' if not row['is_false_positive'] else 'false detection'}" for _, row in coco_df.iterrows() ] else: coco_df["label"] = [ f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)}" for _, row in coco_df.iterrows() ] resh = lambda x: ((x[0], x[1]), (x[0] + x[2], x[1] + x[3])) coco_df["coordinates"] = coco_df["bbox"].apply(resh) # sample colors randomly # create dictionary so that every class maps to one color colormap = {} for idx, classlabel in enumerate(coco_df["category_name"].unique()): colormap[classlabel] = colors[idx % len(colors)] # add a color column for idx, row in coco_df.iterrows(): coco_df.loc[idx, "color"] = colormap[row["category_name"]] for img_name in coco_df.file_name.unique(): source_img = Image.open(f"{coco_dir}/{img_name}") draw = ImageDraw.Draw(source_img) for row in coco_df[coco_df["file_name"] == img_name][ ["label", "coordinates", "color"] ].values: draw.rectangle(row[1], outline=row[2], width=line_width) draw.text( (row[1][0][0], row[1][0][1] + fontYshift), row[0], font=fnt, fill=row[2] ) print(f"Writing {out_dir}/{prefix}_{img_name}") source_img.save(f"{out_dir}/{prefix}_{img_name}", "JPEG")
# convert result dataframe to coco try: coco_df = coco2df(coco)
session_service_node_info.py
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase try: from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import ( AnsibleArgSpecValidator, ) except ImportError: ANSIBLE_UTILS_IS_INSTALLED = False else: ANSIBLE_UTILS_IS_INSTALLED = True from ansible.errors import AnsibleActionFail from ansible_collections.cisco.ise.plugins.module_utils.ise import ( ISESDK, ise_argument_spec, ) # Get common arguements specification argument_spec = ise_argument_spec() # Add arguments specific for this module argument_spec.update(dict( name=dict(type="str"), id=dict(type="str"), page=dict(type="int"), size=dict(type="int"), )) required_if = [] required_one_of = [] mutually_exclusive = [] required_together = [] class ActionModule(ActionBase): def __init__(self, *args, **kwargs): if not ANSIBLE_UTILS_IS_INSTALLED: raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'") super(ActionModule, self).__init__(*args, **kwargs) self._supports_async = True self._result = None # Checks the supplied parameters against the argument spec for this module def _check_argspec(self):
def get_object(self, params): new_object = dict( name=params.get("name"), id=params.get("id"), page=params.get("page"), size=params.get("size"), ) return new_object def run(self, tmp=None, task_vars=None): self._task.diff = False self._result = super(ActionModule, self).run(tmp, task_vars) self._result["changed"] = False self._check_argspec() ise = ISESDK(params=self._task.args) id = self._task.args.get("id") name = self._task.args.get("name") if id: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_id', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if name: response = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_by_name', params=self.get_object(self._task.args) ).response['SessionServiceNode'] self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result if not name and not id: response = [] generator = ise.exec( family="psn_node_details_with_radius_service", function='get_session_service_node_generator', params=self.get_object(self._task.args), ) for item in generator: tmp_response = item.response['SearchResult']['resources'] if isinstance(tmp_response, list): response += tmp_response else: response.append(tmp_response) self._result.update(dict(ise_response=response)) self._result.update(ise.exit_json()) return self._result
aav = AnsibleArgSpecValidator( data=self._task.args, schema=dict(argument_spec=argument_spec), schema_format="argspec", schema_conditionals=dict( required_if=required_if, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, required_together=required_together, ), name=self._task.action, ) valid, errors, self._task.args = aav.validate() if not valid: raise AnsibleActionFail(errors)
describe.py
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A command that describes a resource collection for a given API.""" from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.meta.apis import flags from googlecloudsdk.command_lib.util.apis import registry class Describe(base.DescribeCommand): """Describe the details of a collection for an API.""" @staticmethod def Args(parser): flags.API_VERSION_FLAG.AddToParser(parser) parser.add_argument( 'collection', completer=flags.CollectionCompleter, help='The name of the collection to get the details of.') def Run(self, args):
return registry.GetAPICollection(args.collection, api_version=args.api_version)
__init__.py
from .exporter import csvExporter from .exporter import exporter
schema.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package wrangler import ( "bytes" "fmt" "html/template" "sort" "sync" "time" "golang.org/x/net/context" log "github.com/golang/glog" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( // DefaultWaitSlaveTimeout is the default value for waitSlaveTimeout, which is used when calling method CopySchemaShardFromShard. DefaultWaitSlaveTimeout = 10 * time.Second ) // GetSchema uses an RPC to get the schema from a remote tablet func (wr *Wrangler) GetSchema(ctx context.Context, tabletAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } return wr.tmc.GetSchema(ctx, ti.Tablet, tables, excludeTables, includeViews) } // ReloadSchema forces the remote tablet to reload its schema. func (wr *Wrangler) ReloadSchema(ctx context.Context, tabletAlias *topodatapb.TabletAlias) error { ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } return wr.tmc.ReloadSchema(ctx, ti.Tablet, "") } // ReloadSchemaShard reloads the schema for all slave tablets in a shard, // after they reach a given replication position (empty pos means immediate). // In general, we don't always expect all slaves to be ready to reload, // and the periodic schema reload makes them self-healing anyway. // So we do this on a best-effort basis, and log warnings for any tablets // that fail to reload within the context deadline. func (wr *Wrangler) ReloadSchemaShard(ctx context.Context, keyspace, shard, replicationPos string, concurrency *sync2.Semaphore, includeMaster bool) { tablets, err := wr.ts.GetTabletMapForShard(ctx, keyspace, shard) switch err { case topo.ErrPartialResult: // We got a partial result. Do what we can, but warn // that some may be missed. wr.logger.Warningf("ReloadSchemaShard(%v/%v) got a partial tablet list. Some tablets may not have schema reloaded (use vtctl ReloadSchema to fix individual tablets)", keyspace, shard) case nil: // Good case, keep going too. default: // This is best-effort, so just log it and move on. wr.logger.Warningf("ReloadSchemaShard(%v/%v) failed to load tablet list, will not reload schema (use vtctl ReloadSchemaShard to try again): %v", keyspace, shard, err) return } var wg sync.WaitGroup for _, ti := range tablets { if !includeMaster && ti.Type == topodatapb.TabletType_MASTER { // We don't need to reload on the master // because we assume ExecuteFetchAsDba() // already did that. continue } wg.Add(1) go func(tablet *topodatapb.Tablet) { defer wg.Done() concurrency.Acquire() defer concurrency.Release() if err := wr.tmc.ReloadSchema(ctx, tablet, replicationPos); err != nil { wr.logger.Warningf( "Failed to reload schema on slave tablet %v in %v/%v (use vtctl ReloadSchema to try again): %v", topoproto.TabletAliasString(tablet.Alias), keyspace, shard, err) } }(ti.Tablet) } wg.Wait() } // ReloadSchemaKeyspace reloads the schema in all shards in a // keyspace. The concurrency is shared across all shards (only that // many tablets will be reloaded at once). func (wr *Wrangler) ReloadSchemaKeyspace(ctx context.Context, keyspace string, concurrency *sync2.Semaphore, includeMaster bool) error { shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } for _, shard := range shards { wr.ReloadSchemaShard(ctx, keyspace, shard, "" /* waitPosition */, concurrency, includeMaster) } return nil } // helper method to asynchronously diff a schema func (wr *Wrangler) diffSchema(ctx context.Context, masterSchema *tabletmanagerdatapb.SchemaDefinition, masterTabletAlias, alias *topodatapb.TabletAlias, excludeTables []string, includeViews bool, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() log.Infof("Gathering schema for %v", topoproto.TabletAliasString(alias)) slaveSchema, err := wr.GetSchema(ctx, alias, nil, excludeTables, includeViews) if err != nil { er.RecordError(err) return } log.Infof("Diffing schema for %v", topoproto.TabletAliasString(alias)) tmutils.DiffSchema(topoproto.TabletAliasString(masterTabletAlias), masterSchema, topoproto.TabletAliasString(alias), slaveSchema, er) } // ValidateSchemaShard will diff the schema from all the tablets in the shard. func (wr *Wrangler) ValidateSchemaShard(ctx context.Context, keyspace, shard string, excludeTables []string, includeViews bool) error { si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return err } // get schema from the master, or error if !si.HasMaster() { return fmt.Errorf("No master in shard %v/%v", keyspace, shard) } log.Infof("Gathering schema for master %v", topoproto.TabletAliasString(si.MasterAlias)) masterSchema, err := wr.GetSchema(ctx, si.MasterAlias, nil, excludeTables, includeViews) if err != nil { return err } // read all the aliases in the shard, that is all tablets that are // replicating from the master aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard) if err != nil { return err } // then diff with all slaves er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} for _, alias := range aliases { if topoproto.TabletAliasEqual(alias, si.MasterAlias) { continue } wg.Add(1) go wr.diffSchema(ctx, masterSchema, si.MasterAlias, alias, excludeTables, includeViews, &wg, &er) } wg.Wait() if er.HasErrors() { return fmt.Errorf("Schema diffs: %v", er.Error().Error()) } return nil } // ValidateSchemaKeyspace will diff the schema from all the tablets in // the keyspace. func (wr *Wrangler) ValidateSchemaKeyspace(ctx context.Context, keyspace string, excludeTables []string, includeViews bool) error { // find all the shards shards, err := wr.ts.GetShardNames(ctx, keyspace) if err != nil { return err } // corner cases if len(shards) == 0 { return fmt.Errorf("No shards in keyspace %v", keyspace) } sort.Strings(shards) if len(shards) == 1 { return wr.ValidateSchemaShard(ctx, keyspace, shards[0], excludeTables, includeViews) } // find the reference schema using the first shard's master si, err := wr.ts.GetShard(ctx, keyspace, shards[0]) if err != nil { return err } if !si.HasMaster() { return fmt.Errorf("No master in shard %v/%v", keyspace, shards[0]) } referenceAlias := si.MasterAlias log.Infof("Gathering schema for reference master %v", topoproto.TabletAliasString(referenceAlias)) referenceSchema, err := wr.GetSchema(ctx, referenceAlias, nil, excludeTables, includeViews) if err != nil { return err } // then diff with all other tablets everywhere er := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} // first diff the slaves in the reference shard 0 aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shards[0]) if err != nil { return err } for _, alias := range aliases { if topoproto.TabletAliasEqual(alias, si.MasterAlias) { continue } wg.Add(1) go wr.diffSchema(ctx, referenceSchema, referenceAlias, alias, excludeTables, includeViews, &wg, &er) } // then diffs all tablets in the other shards for _, shard := range shards[1:] { si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { er.RecordError(err) continue } if !si.HasMaster() { er.RecordError(fmt.Errorf("No master in shard %v/%v", keyspace, shard)) continue } aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard) if err != nil { er.RecordError(err) continue } for _, alias := range aliases { wg.Add(1) go wr.diffSchema(ctx, referenceSchema, referenceAlias, alias, excludeTables, includeViews, &wg, &er) } } wg.Wait() if er.HasErrors() { return fmt.Errorf("Schema diffs: %v", er.Error().Error()) } return nil } // PreflightSchema will try a schema change on the remote tablet. func (wr *Wrangler) PreflightSchema(ctx context.Context, tabletAlias *topodatapb.TabletAlias, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { ti, err := wr.ts.GetTablet(ctx, tabletAlias) if err != nil { return nil, err } return wr.tmc.PreflightSchema(ctx, ti.Tablet, changes) } // CopySchemaShardFromShard copies the schema from a source shard to the specified destination shard. // For both source and destination it picks the master tablet. See also CopySchemaShard. func (wr *Wrangler) CopySchemaShardFromShard(ctx context.Context, tables, excludeTables []string, includeViews bool, sourceKeyspace, sourceShard, destKeyspace, destShard string, waitSlaveTimeout time.Duration) error { sourceShardInfo, err := wr.ts.GetShard(ctx, sourceKeyspace, sourceShard) if err != nil { return err } if sourceShardInfo.MasterAlias == nil { return fmt.Errorf("no master in shard record %v/%v. Consider running 'vtctl InitShardMaster' in case of a new shard or to reparent the shard to fix the topology data, or providing a non-master tablet alias", sourceKeyspace, sourceShard) } return wr.CopySchemaShard(ctx, sourceShardInfo.MasterAlias, tables, excludeTables, includeViews, destKeyspace, destShard, waitSlaveTimeout) } // CopySchemaShard copies the schema from a source tablet to the // specified shard. The schema is applied directly on the master of // the destination shard, and is propogated to the replicas through // binlogs. func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool, destKeyspace, destShard string, waitSlaveTimeout time.Duration) error { destShardInfo, err := wr.ts.GetShard(ctx, destKeyspace, destShard) if err != nil { return err } if destShardInfo.MasterAlias == nil { return fmt.Errorf("no master in shard record %v/%v. Consider to run 'vtctl InitShardMaster' in case of a new shard or to reparent the shard to fix the topology data", destKeyspace, destShard) } err = wr.copyShardMetadata(ctx, sourceTabletAlias, destShardInfo.MasterAlias) if err != nil { return err } diffs, err := wr.compareSchemas(ctx, sourceTabletAlias, destShardInfo.MasterAlias, tables, excludeTables, includeViews) if err != nil { return fmt.Errorf("CopySchemaShard failed because schemas could not be compared initially: %v", err) } if diffs == nil { // Return early because dest has already the same schema as source. return nil } sourceSd, err := wr.GetSchema(ctx, sourceTabletAlias, tables, excludeTables, includeViews) if err != nil { return err } createSQL := tmutils.SchemaDefinitionToSQLStrings(sourceSd) destTabletInfo, err := wr.ts.GetTablet(ctx, destShardInfo.MasterAlias) if err != nil { return err } for i, sqlLine := range createSQL { err = wr.applySQLShard(ctx, destTabletInfo, sqlLine, i == len(createSQL)-1) if err != nil { return fmt.Errorf("creating a table failed."+ " Most likely some tables already exist on the destination and differ from the source."+ " Please remove all to be copied tables from the destination manually and run this command again."+ " Full error: %v", err) } } // Remember the replication position after all the above were applied. destMasterPos, err := wr.tmc.MasterPosition(ctx, destTabletInfo.Tablet) if err != nil { return fmt.Errorf("CopySchemaShard: can't get replication position after schema applied: %v", err) } // Although the copy was successful, we have to verify it to catch the case // where the database already existed on the destination, but with different // options e.g. a different character set. // In that case, MySQL would have skipped our CREATE DATABASE IF NOT EXISTS // statement. We want to fail early in this case because vtworker SplitDiff // fails in case of such an inconsistency as well. diffs, err = wr.compareSchemas(ctx, sourceTabletAlias, destShardInfo.MasterAlias, tables, excludeTables, includeViews) if err != nil { return fmt.Errorf("CopySchemaShard failed because schemas could not be compared finally: %v", err) } if diffs != nil { return fmt.Errorf("CopySchemaShard was not successful because the schemas between the two tablets %v and %v differ: %v", sourceTabletAlias, destShardInfo.MasterAlias, diffs) } // Notify slaves to reload schema. This is best-effort. concurrency := sync2.NewSemaphore(10, 0) reloadCtx, cancel := context.WithTimeout(ctx, waitSlaveTimeout) defer cancel() wr.ReloadSchemaShard(reloadCtx, destKeyspace, destShard, destMasterPos, concurrency, true /* includeMaster */) return nil } // copyShardMetadata copies contents of _vt.shard_metadata table from the source // tablet to the destination tablet. It's assumed that destination tablet is a // master and binlogging is not turned off when INSERT statements are executed. func (wr *Wrangler) copyShardMetadata(ctx context.Context, srcTabletAlias *topodatapb.TabletAlias, destTabletAlias *topodatapb.TabletAlias) error { presenceResult, err := wr.ExecuteFetchAsDba(ctx, srcTabletAlias, "SELECT 1 FROM information_schema.tables WHERE table_schema = '_vt' AND table_name = 'shard_metadata'", 1, false, false) if err != nil { return err } if len(presenceResult.Rows) == 0 { log.Infof("_vt.shard_metadata doesn't exist on the source tablet %v, skipping its copy.", topoproto.TabletAliasString(srcTabletAlias)) return nil } dataProto, err := wr.ExecuteFetchAsDba(ctx, srcTabletAlias, "SELECT name, value FROM _vt.shard_metadata", 100, false, false) if err != nil { return err } data := sqltypes.Proto3ToResult(dataProto) for _, row := range data.Rows { name := row[0] value := row[1] queryBuf := bytes.Buffer{} queryBuf.WriteString("INSERT INTO _vt.shard_metadata (name, value) VALUES (") name.EncodeSQL(&queryBuf) queryBuf.WriteByte(',') value.EncodeSQL(&queryBuf) queryBuf.WriteString(") ON DUPLICATE KEY UPDATE value = ") value.EncodeSQL(&queryBuf) _, err := wr.ExecuteFetchAsDba(ctx, destTabletAlias, queryBuf.String(), 0, false, false) if err != nil { return err } } return nil } // compareSchemas returns nil if the schema of the two tablets referenced by // "sourceAlias" and "destAlias" are identical. Otherwise, the difference is // returned as []string. func (wr *Wrangler) compareSchemas(ctx context.Context, sourceAlias, destAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool) ([]string, error) { sourceSd, err := wr.GetSchema(ctx, sourceAlias, tables, excludeTables, includeViews) if err != nil { return nil, fmt.Errorf("failed to get schema from tablet %v. err: %v", sourceAlias, err) } destSd, err := wr.GetSchema(ctx, destAlias, tables, excludeTables, includeViews) if err != nil { return nil, fmt.Errorf("failed to get schema from tablet %v. err: %v", destAlias, err) } return tmutils.DiffSchemaToArray("source", sourceSd, "dest", destSd), nil } // applySQLShard applies a given SQL change on a given tablet alias. It allows executing arbitrary // SQL statements, but doesn't return any results, so it's only useful for SQL statements // that would be run for their effects (e.g., CREATE). // It works by applying the SQL statement on the shard's master tablet with replication turned on. // Thus it should be used only for changes that can be applied on a live instance without causing issues; // it shouldn't be used for anything that will require a pivot. // The SQL statement string is expected to have {{.DatabaseName}} in place of the actual db name. func (wr *Wrangler) applySQLShard(ctx context.Context, tabletInfo *topo.TabletInfo, change string, reloadSchema bool) error { filledChange, err := fillStringTemplate(change, map[string]string{"DatabaseName": tabletInfo.DbName()}) if err != nil { return fmt.Errorf("fillStringTemplate failed: %v", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() // Need to make sure that we enable binlog, since we're only applying the statement on masters. _, err = wr.tmc.ExecuteFetchAsDba(ctx, tabletInfo.Tablet, false, []byte(filledChange), 0, false, reloadSchema) return err } // fillStringTemplate returns the string template filled func
(tmpl string, vars interface{}) (string, error) { myTemplate := template.Must(template.New("").Parse(tmpl)) data := new(bytes.Buffer) if err := myTemplate.Execute(data, vars); err != nil { return "", err } return data.String(), nil }
fillStringTemplate
QuestionList.js
import React, {useEffect, useState} from 'react';
import Paper from "@material-ui/core/Paper"; import Table from "@material-ui/core/Table"; import TableBody from "@material-ui/core/TableBody"; import TableCell from "@material-ui/core/TableCell"; import TableContainer from "@material-ui/core/TableContainer"; import TableHead from "@material-ui/core/TableHead"; import TableRow from "@material-ui/core/TableRow"; import Typography from "@material-ui/core/Typography"; import Grid from "@material-ui/core/Grid"; import Add from "@material-ui/icons/Add"; import Delete from "@material-ui/icons/Delete"; import Edit from "@material-ui/icons/Edit"; import { Link } from "react-router-dom"; import {deleteEntry, fetchAll} from "../../actions/apiActions"; import DialogTitle from "@material-ui/core/DialogTitle"; import DialogActions from "@material-ui/core/DialogActions"; import Dialog from "@material-ui/core/Dialog"; export default function QuestionList() { const [isLoading, setIsLoading] = useState(true); const [questions, setQuestions] = useState([]); const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); const [deletableQuestion, setDeletableQuestion] = useState( null); useEffect( () => { fetchAll("questions/group/theme") .then( response => { setQuestions(response); setIsLoading(false); }); }, []); return ( <div> {isLoading && <CircularProgress />} {questions && <div> <Dialog open={deleteDialogOpen} onClose={() => deleteDialogOpen(false) } aria-labelledby="form-dialog-title"> <DialogTitle id="form-dialog-title">Möchten Sie diese Frage wirklich löschen?</DialogTitle> <DialogActions> <Button onClick={() => setDeleteDialogOpen(false)} color="primary"> Abbrechen </Button> <Button onClick={() => { if(deletableQuestion) { deleteEntry("questions", deletableQuestion) .then(() => { setIsLoading(true); fetchAll("questions/group/theme") .then(response => { setQuestions(response); setIsLoading(false); }); }); setDeletableQuestion(null); } setDeleteDialogOpen(false); }} color="primary"> Löschen </Button> </DialogActions> </Dialog> <Grid> <div style={{marginBottom: "20px"}}> <Link to={"/question/create"}> <Button variant="contained" color="primary" startIcon={<Add/>}> Frage hinzufügen </Button> </Link> </div> <Grid container spacing={4}> {questions.map( (theme) => ( <Grid key={theme.id} item xs={12}> <Typography variant="h5" gutterBottom={true}> {theme.title} </Typography> <TableContainer component={Paper}> <Table> <TableHead> <TableRow> <TableCell style={{width: '220px'}}>Titel</TableCell> <TableCell>Frage</TableCell> <TableCell style={{width: '180px'}}>Typ</TableCell> <TableCell style={{width: '80px'}} align={"right"}>Aktionen</TableCell> </TableRow> </TableHead> <TableBody> {theme.questions.map( (question) => ( <TableRow key={question.id}> <TableCell>{question.header}</TableCell> <TableCell>{question.title}</TableCell> <TableCell>{question.question_type.title}</TableCell> <TableCell align={"right"}> <Link style={{color: "#000000"}} key={question.id} to={'/question/edit/'+question.id}> <Edit /> </Link> <Delete style={{cursor: "pointer"}} onClick={ () => { setDeletableQuestion(question.id); setDeleteDialogOpen(true) } } /> </TableCell> </TableRow> ))} </TableBody> </Table> </TableContainer> </Grid> ))} </Grid> </Grid> </div> } </div> ); };
import Button from "@material-ui/core/Button"; import CircularProgress from "@material-ui/core/CircularProgress";
openshift_facts.py
#!/usr/bin/python # pylint: disable=too-many-lines # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 # Reason: Disable pylint too-many-lines because we don't want to split up this file. # Status: Permanently disabled to keep this module as self-contained as possible. """Ansible module for retrieving and setting openshift related facts""" DOCUMENTATION = ''' --- module: openshift_facts short_description: Cluster Facts author: Jason DeTiberus requirements: [ ] ''' EXAMPLES = ''' ''' import ConfigParser import copy import os import StringIO import yaml from distutils.util import strtobool from distutils.version import LooseVersion import struct import socket from dbus import SystemBus, Interface from dbus.exceptions import DBusException def migrate_docker_facts(facts): """ Apply migrations for docker facts """ params = { 'common': ( 'additional_registries', 'insecure_registries', 'blocked_registries', 'options' ), 'node': ( 'log_driver', 'log_options' ) } if 'docker' not in facts: facts['docker'] = {} for role in params.keys(): if role in facts: for param in params[role]: old_param = 'docker_' + param if old_param in facts[role]: facts['docker'][param] = facts[role].pop(old_param) if 'node' in facts and 'portal_net' in facts['node']: facts['docker']['hosted_registry_insecure'] = True facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net') # log_options was originally meant to be a comma separated string, but # we now prefer an actual list, with backward compatability: if 'log_options' in facts['docker'] and \ isinstance(facts['docker']['log_options'], basestring): facts['docker']['log_options'] = facts['docker']['log_options'].split(",") return facts # TODO: We should add a generic migration function that takes source and destination # paths and does the right thing rather than one function for common, one for node, etc. def migrate_common_facts(facts): """ Migrate facts from various roles into common """ params = { 'node': ('portal_net'), 'master': ('portal_net') } if 'common' not in facts: facts['common'] = {} for role in params.keys(): if role in facts: for param in params[role]: if param in facts[role]: facts['common'][param] = facts[role].pop(param) return facts def migrate_node_facts(facts): """ Migrate facts from various roles into node """ params = { 'common': ('dns_ip'), } if 'node' not in facts: facts['node'] = {} for role in params.keys(): if role in facts: for param in params[role]: if param in facts[role]: facts['node'][param] = facts[role].pop(param) return facts def migrate_local_facts(facts): """ Apply migrations of local facts """ migrated_facts = copy.deepcopy(facts) migrated_facts = migrate_docker_facts(migrated_facts) migrated_facts = migrate_common_facts(migrated_facts) migrated_facts = migrate_node_facts(migrated_facts) migrated_facts = migrate_hosted_facts(migrated_facts) return migrated_facts def migrate_hosted_facts(facts): """ Apply migrations for master facts """ if 'master' in facts: if 'router_selector' in facts['master']: if 'hosted' not in facts: facts['hosted'] = {} if 'router' not in facts['hosted']: facts['hosted']['router'] = {} facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') return facts def first_ip(network): """ Return the first IPv4 address in network Args: network (str): network in CIDR format Returns: str: first IPv4 address """ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) (address, netmask) = network.split('/') netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff return itoa((atoi(address) & netmask_i) + 1) def hostname_valid(hostname): """ Test if specified hostname should be considered valid Args: hostname (str): hostname to test Returns: bool: True if valid, otherwise False """ if (not hostname or hostname.startswith('localhost') or hostname.endswith('localdomain') or len(hostname.split('.')) < 2): return False return True def choose_hostname(hostnames=None, fallback=''): """ Choose a hostname from the provided hostnames Given a list of hostnames and a fallback value, choose a hostname to use. This function will prefer fqdns if they exist (excluding any that begin with localhost or end with localdomain) over ip addresses. Args: hostnames (list): list of hostnames fallback (str): default value to set if hostnames does not contain a valid hostname Returns: str: chosen hostname """ hostname = fallback if hostnames is None: return hostname ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z' ips = [i for i in hostnames if (i is not None and isinstance(i, basestring) and re.match(ip_regex, i))] hosts = [i for i in hostnames if i is not None and i != '' and i not in ips] for host_list in (hosts, ips): for host in host_list: if hostname_valid(host): return host return hostname def query_metadata(metadata_url, headers=None, expect_json=False): """ Return metadata from the provided metadata_url Args: metadata_url (str): metadata url headers (dict): headers to set for metadata request expect_json (bool): does the metadata_url return json Returns: dict or list: metadata request result """ result, info = fetch_url(module, metadata_url, headers=headers) if info['status'] != 200: raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") if expect_json: return module.from_json(result.read()) else: return [line.strip() for line in result.readlines()] def walk_metadata(metadata_url, headers=None, expect_json=False): """ Walk the metadata tree and return a dictionary of the entire tree Args: metadata_url (str): metadata url headers (dict): headers to set for metadata request expect_json (bool): does the metadata_url return json Returns: dict: the result of walking the metadata tree """ metadata = dict() for line in query_metadata(metadata_url, headers, expect_json): if line.endswith('/') and not line == 'public-keys/': key = line[:-1] metadata[key] = walk_metadata(metadata_url + line, headers, expect_json) else: results = query_metadata(metadata_url + line, headers, expect_json) if len(results) == 1: # disable pylint maybe-no-member because overloaded use of # the module name causes pylint to not detect that results # is an array or hash # pylint: disable=maybe-no-member metadata[line] = results.pop() else: metadata[line] = results return metadata def get_provider_metadata(metadata_url, supports_recursive=False, headers=None, expect_json=False): """ Retrieve the provider metadata Args: metadata_url (str): metadata url supports_recursive (bool): does the provider metadata api support recursion headers (dict): headers to set for metadata request expect_json (bool): does the metadata_url return json Returns: dict: the provider metadata """ try: if supports_recursive: metadata = query_metadata(metadata_url, headers, expect_json) else: metadata = walk_metadata(metadata_url, headers, expect_json) except OpenShiftFactsMetadataUnavailableError: metadata = None return metadata def normalize_gce_facts(metadata, facts): """ Normalize gce facts Args: metadata (dict): provider metadata facts (dict): facts to update Returns: dict: the result of adding the normalized metadata to the provided facts dict """ for interface in metadata['instance']['networkInterfaces']: int_info = dict(ips=[interface['ip']], network_type='gce') int_info['public_ips'] = [ac['externalIp'] for ac in interface['accessConfigs']] int_info['public_ips'].extend(interface['forwardedIps']) _, _, network_id = interface['network'].rpartition('/') int_info['network_id'] = network_id facts['network']['interfaces'].append(int_info) _, _, zone = metadata['instance']['zone'].rpartition('/') facts['zone'] = zone # GCE currently only supports a single interface facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0] pub_ip = facts['network']['interfaces'][0]['public_ips'][0] facts['network']['public_ip'] = pub_ip facts['network']['hostname'] = metadata['instance']['hostname'] # TODO: attempt to resolve public_hostname facts['network']['public_hostname'] = facts['network']['public_ip'] return facts def normalize_aws_facts(metadata, facts): """ Normalize aws facts Args: metadata (dict): provider metadata facts (dict): facts to update Returns: dict: the result of adding the normalized metadata to the provided facts dict """ for interface in sorted( metadata['network']['interfaces']['macs'].values(), key=lambda x: x['device-number'] ): int_info = dict() var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} for ips_var, int_var in var_map.iteritems(): ips = interface.get(int_var) if isinstance(ips, basestring): int_info[ips_var] = [ips] else: int_info[ips_var] = ips if 'vpc-id' in interface: int_info['network_type'] = 'vpc' else: int_info['network_type'] = 'classic' if int_info['network_type'] == 'vpc': int_info['network_id'] = interface['subnet-id'] else: int_info['network_id'] = None facts['network']['interfaces'].append(int_info) facts['zone'] = metadata['placement']['availability-zone'] # TODO: actually attempt to determine default local and public ips # by using the ansible default ip fact and the ipv4-associations # from the ec2 metadata facts['network']['ip'] = metadata.get('local-ipv4') facts['network']['public_ip'] = metadata.get('public-ipv4') # TODO: verify that local hostname makes sense and is resolvable facts['network']['hostname'] = metadata.get('local-hostname') # TODO: verify that public hostname makes sense and is resolvable facts['network']['public_hostname'] = metadata.get('public-hostname') return facts def normalize_openstack_facts(metadata, facts): """ Normalize openstack facts Args: metadata (dict): provider metadata facts (dict): facts to update Returns: dict: the result of adding the normalized metadata to the provided facts dict """ # openstack ec2 compat api does not support network interfaces and # the version tested on did not include the info in the openstack # metadata api, should be updated if neutron exposes this. facts['zone'] = metadata['availability_zone'] local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0] facts['network']['ip'] = local_ipv4 facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4'] # TODO: verify local hostname makes sense and is resolvable facts['network']['hostname'] = metadata['hostname'] # TODO: verify that public hostname makes sense and is resolvable pub_h = metadata['ec2_compat']['public-hostname'] facts['network']['public_hostname'] = pub_h return facts def normalize_provider_facts(provider, metadata): """ Normalize provider facts Args: provider (str): host provider metadata (dict): provider metadata Returns: dict: the normalized provider facts """ if provider is None or metadata is None: return {} # TODO: test for ipv6_enabled where possible (gce, aws do not support) # and configure ipv6 facts if available # TODO: add support for setting user_data if available facts = dict(name=provider, metadata=metadata, network=dict(interfaces=[], ipv6_enabled=False)) if provider == 'gce': facts = normalize_gce_facts(metadata, facts) elif provider == 'aws': facts = normalize_aws_facts(metadata, facts) elif provider == 'openstack': facts = normalize_openstack_facts(metadata, facts) return facts def set_flannel_facts_if_unset(facts): """ Set flannel facts if not already present in facts dict dict: the facts dict updated with the flannel facts if missing Args: facts (dict): existing facts Returns: dict: the facts dict updated with the flannel facts if they were not already present """ if 'common' in facts: if 'use_flannel' not in facts['common']: use_flannel = False facts['common']['use_flannel'] = use_flannel return facts def set_nuage_facts_if_unset(facts): """ Set nuage facts if not already present in facts dict dict: the facts dict updated with the nuage facts if missing Args: facts (dict): existing facts Returns: dict: the facts dict updated with the nuage facts if they were not already present """ if 'common' in facts: if 'use_nuage' not in facts['common']: use_nuage = False facts['common']['use_nuage'] = use_nuage return facts def set_node_schedulability(facts): """ Set schedulable facts if not already present in facts dict Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated schedulable facts if they were not already present """ if 'node' in facts: if 'schedulable' not in facts['node']: if 'master' in facts: facts['node']['schedulable'] = False else: facts['node']['schedulable'] = True return facts def set_selectors(facts): """ Set selectors facts if not already present in facts dict Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated selectors facts if they were not already present """ deployment_type = facts['common']['deployment_type'] if deployment_type == 'online': selector = "type=infra" else: selector = "region=infra" if 'hosted' not in facts: facts['hosted'] = {} if 'router' not in facts['hosted']: facts['hosted']['router'] = {} if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: facts['hosted']['router']['selector'] = selector if 'master' in facts: if 'infra_nodes' in facts['master']: if 'registry_selector' not in facts['master']: facts['master']['registry_selector'] = selector return facts def set_metrics_facts_if_unset(facts): """ Set cluster metrics facts if not already present in facts dict dict: the facts dict updated with the generated cluster metrics facts if missing Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated cluster metrics facts if they were not already present """ if 'common' in facts: if 'use_cluster_metrics' not in facts['common']: use_cluster_metrics = False facts['common']['use_cluster_metrics'] = use_cluster_metrics return facts def set_dnsmasq_facts_if_unset(facts): """ Set dnsmasq facts if not already present in facts Args: facts (dict) existing facts Returns: facts (dict) updated facts with values set if not previously set """ if 'common' in facts: if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']: facts['common']['use_dnsmasq'] = True else: facts['common']['use_dnsmasq'] = False if 'master' in facts and 'dns_port' not in facts['master']: if facts['common']['use_dnsmasq']: facts['master']['dns_port'] = 8053 else: facts['master']['dns_port'] = 53 return facts def set_project_cfg_facts_if_unset(facts): """ Set Project Configuration facts if not already present in facts dict dict: Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated Project Configuration facts if they were not already present """ config = { 'default_node_selector': '', 'project_request_message': '', 'project_request_template': '', 'mcs_allocator_range': 's0:/2', 'mcs_labels_per_project': 5, 'uid_allocator_range': '1000000000-1999999999/10000' } if 'master' in facts: for key, value in config.items(): if key not in facts['master']: facts['master'][key] = value return facts def set_identity_providers_if_unset(facts): """ Set identity_providers fact if not already present in facts dict Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated identity providers facts if they were not already present """ if 'master' in facts: deployment_type = facts['common']['deployment_type'] if 'identity_providers' not in facts['master']: identity_provider = dict( name='allow_all', challenge=True, login=True, kind='AllowAllPasswordIdentityProvider' ) if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: identity_provider = dict( name='deny_all', challenge=True, login=True, kind='DenyAllPasswordIdentityProvider' ) facts['master']['identity_providers'] = [identity_provider] return facts def set_url_facts_if_unset(facts): """ Set url facts if not already present in facts dict Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated url facts if they were not already present """ if 'master' in facts: hostname = facts['common']['hostname'] cluster_hostname = facts['master'].get('cluster_hostname') cluster_public_hostname = facts['master'].get('cluster_public_hostname') public_hostname = facts['common']['public_hostname'] api_hostname = cluster_hostname if cluster_hostname else hostname api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname console_path = facts['master']['console_path'] etcd_hosts = facts['master']['etcd_hosts'] use_ssl = dict( api=facts['master']['api_use_ssl'], public_api=facts['master']['api_use_ssl'], loopback_api=facts['master']['api_use_ssl'], console=facts['master']['console_use_ssl'], public_console=facts['master']['console_use_ssl'], etcd=facts['master']['etcd_use_ssl'] ) ports = dict( api=facts['master']['api_port'], public_api=facts['master']['api_port'], loopback_api=facts['master']['api_port'], console=facts['master']['console_port'], public_console=facts['master']['console_port'], etcd=facts['master']['etcd_port'], ) etcd_urls = [] if etcd_hosts != '': facts['master']['etcd_port'] = ports['etcd'] facts['master']['embedded_etcd'] = False for host in etcd_hosts: etcd_urls.append(format_url(use_ssl['etcd'], host, ports['etcd'])) else: etcd_urls = [format_url(use_ssl['etcd'], hostname, ports['etcd'])] facts['master'].setdefault('etcd_urls', etcd_urls) prefix_hosts = [('api', api_hostname), ('public_api', api_public_hostname), ('loopback_api', hostname)] for prefix, host in prefix_hosts: facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], host, ports[prefix])) r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-') r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-') facts['master'].setdefault('loopback_cluster_name', r_lhn) facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn)) facts['master'].setdefault('loopback_user', r_lhu) prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)] for prefix, host in prefix_hosts: facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix], host, ports[prefix], console_path)) return facts def set_aggregate_facts(facts): """ Set aggregate facts Args: facts (dict): existing facts Returns: dict: the facts dict updated with aggregated facts """ all_hostnames = set() internal_hostnames = set() kube_svc_ip = first_ip(facts['common']['portal_net']) if 'common' in facts: all_hostnames.add(facts['common']['hostname']) all_hostnames.add(facts['common']['public_hostname']) all_hostnames.add(facts['common']['ip']) all_hostnames.add(facts['common']['public_ip']) facts['common']['kube_svc_ip'] = kube_svc_ip internal_hostnames.add(facts['common']['hostname']) internal_hostnames.add(facts['common']['ip']) cluster_domain = facts['common']['dns_domain'] if 'master' in facts: if 'cluster_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_public_hostname']) svc_names = ['openshift', 'openshift.default', 'openshift.default.svc', 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain] all_hostnames.update(svc_names) internal_hostnames.update(svc_names) all_hostnames.add(kube_svc_ip) internal_hostnames.add(kube_svc_ip) facts['common']['all_hostnames'] = list(all_hostnames) facts['common']['internal_hostnames'] = list(internal_hostnames) return facts def set_etcd_facts_if_unset(facts): """ If using embedded etcd, loads the data directory from master-config.yaml. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf. If anything goes wrong parsing these, the fact will not be set. """ if 'master' in facts and facts['master']['embedded_etcd']: etcd_facts = facts['etcd'] if 'etcd' in facts else dict() if 'etcd_data_dir' not in etcd_facts: try: # Parse master config to find actual etcd data dir: master_cfg_path = os.path.join(facts['common']['config_base'], 'master/master-config.yaml') master_cfg_f = open(master_cfg_path, 'r') config = yaml.safe_load(master_cfg_f.read()) master_cfg_f.close() etcd_facts['etcd_data_dir'] = \ config['etcdConfig']['storageDirectory'] facts['etcd'] = etcd_facts # We don't want exceptions bubbling up here: # pylint: disable=broad-except except Exception: pass else: etcd_facts = facts['etcd'] if 'etcd' in facts else dict() # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: try: # Add a fake section for parsing: ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read() ini_fp = StringIO.StringIO(ini_str) config = ConfigParser.RawConfigParser() config.readfp(ini_fp) etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): etcd_data_dir = etcd_data_dir[1:-1] etcd_facts['etcd_data_dir'] = etcd_data_dir facts['etcd'] = etcd_facts # We don't want exceptions bubbling up here: # pylint: disable=broad-except except Exception: pass return facts def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently includes common.service_type, common.config_base, master.registry_url, node.registry_url, node.storage_plugin_deps Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated deployment_type facts """ # disabled to avoid breaking up facts related to deployment type into # multiple methods for now. # pylint: disable=too-many-statements, too-many-branches if 'common' in facts: deployment_type = facts['common']['deployment_type'] if 'service_type' not in facts['common']: service_type = 'atomic-openshift' if deployment_type == 'origin': service_type = 'origin' elif deployment_type in ['enterprise']: service_type = 'openshift' facts['common']['service_type'] = service_type if 'config_base' not in facts['common']: config_base = '/etc/origin' if deployment_type in ['enterprise']: config_base = '/etc/openshift' # Handle upgrade scenarios when symlinks don't yet exist: if not os.path.exists(config_base) and os.path.exists('/etc/openshift'): config_base = '/etc/openshift' facts['common']['config_base'] = config_base if 'data_dir' not in facts['common']: data_dir = '/var/lib/origin' if deployment_type in ['enterprise']: data_dir = '/var/lib/openshift' # Handle upgrade scenarios when symlinks don't yet exist: if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'): data_dir = '/var/lib/openshift' facts['common']['data_dir'] = data_dir if 'docker' in facts: deployment_type = facts['common']['deployment_type'] if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: addtl_regs = facts['docker'].get('additional_registries', []) ent_reg = 'registry.access.redhat.com' if ent_reg not in addtl_regs: facts['docker']['additional_registries'] = addtl_regs + [ent_reg] for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] if 'registry_url' not in facts[role]: registry_url = 'openshift/origin-${component}:${version}' if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: registry_url = 'openshift3/ose-${component}:${version}' elif deployment_type == 'atomic-enterprise': registry_url = 'aep3_beta/aep-${component}:${version}' facts[role]['registry_url'] = registry_url if 'master' in facts: deployment_type = facts['common']['deployment_type'] openshift_features = ['Builder', 'S2IBuilder', 'WebConsole'] if 'disabled_features' in facts['master']: if deployment_type == 'atomic-enterprise': curr_disabled_features = set(facts['master']['disabled_features']) facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features)) else: if deployment_type == 'atomic-enterprise': facts['master']['disabled_features'] = openshift_features if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']: facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] else: facts['node']['storage_plugin_deps'] = [] return facts def set_version_facts_if_unset(facts): """ Set version facts. This currently includes common.version and common.version_gte_3_1_or_1_1. Args: facts (dict): existing facts Returns: dict: the facts dict updated with version facts. """ if 'common' in facts: deployment_type = facts['common']['deployment_type'] version = get_openshift_version(facts) if version is not None: facts['common']['version'] = version if deployment_type == 'origin': version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0') else: version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901') else: version_gte_3_1_or_1_1 = True version_gte_3_1_1_or_1_1_1 = True version_gte_3_2_or_1_2 = True facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 if version_gte_3_2_or_1_2: examples_content_version = 'v1.2' elif version_gte_3_1_or_1_1: examples_content_version = 'v1.1' else: examples_content_version = 'v1.0' facts['common']['examples_content_version'] = examples_content_version return facts def set_manageiq_facts_if_unset(facts): """ Set manageiq facts. This currently includes common.use_manageiq. Args: facts (dict): existing facts Returns: dict: the facts dict updated with version facts. Raises: OpenShiftFactsInternalError: """ if 'common' not in facts: if 'version_gte_3_1_or_1_1' not in facts['common']: raise OpenShiftFactsInternalError( "Invalid invocation: The required facts are not set" ) if 'use_manageiq' not in facts['common']: facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] return facts def set_sdn_facts_if_unset(facts, system_facts): """ Set sdn facts if not already present in facts dict Args: facts (dict): existing facts system_facts (dict): ansible_facts Returns: dict: the facts dict updated with the generated sdn facts if they were not already present """ if 'common' in facts: use_sdn = facts['common']['use_openshift_sdn'] if not (use_sdn == '' or isinstance(use_sdn, bool)): use_sdn = safe_get_bool(use_sdn) facts['common']['use_openshift_sdn'] = use_sdn if 'sdn_network_plugin_name' not in facts['common']: plugin = 'redhat/openshift-ovs-subnet' if use_sdn else '' facts['common']['sdn_network_plugin_name'] = plugin if 'master' in facts: if 'sdn_cluster_network_cidr' not in facts['master']: facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16' if 'sdn_host_subnet_length' not in facts['master']: facts['master']['sdn_host_subnet_length'] = '8' if 'node' in facts and 'sdn_mtu' not in facts['node']: node_ip = facts['common']['ip'] # default MTU if interface MTU cannot be detected facts['node']['sdn_mtu'] = '1450' for val in system_facts.itervalues(): if isinstance(val, dict) and 'mtu' in val: mtu = val['mtu'] if 'ipv4' in val and val['ipv4'].get('address') == node_ip: facts['node']['sdn_mtu'] = str(mtu - 50) return facts def migrate_oauth_template_facts(facts): """ Migrate an old oauth template fact to a newer format if it's present. The legacy 'oauth_template' fact was just a filename, and assumed you were setting the 'login' template. The new pluralized 'oauth_templates' fact is a dict mapping the template name to a filename. Simplify the code after this by merging the old fact into the new. """ if 'master' in facts and 'oauth_template' in facts['master']: if 'oauth_templates' not in facts['master']: facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']} elif 'login' not in facts['master']['oauth_templates']: facts['master']['oauth_templates']['login'] = facts['master']['oauth_template'] return facts def format_url(use_ssl, hostname, port, path=''): """ Format url based on ssl flag, hostname, port and path Args: use_ssl (bool): is ssl enabled hostname (str): hostname port (str): port path (str): url path Returns: str: The generated url string """ scheme = 'https' if use_ssl else 'http' netloc = hostname if (use_ssl and port != '443') or (not use_ssl and port != '80'): netloc += ":%s" % port return urlparse.urlunparse((scheme, netloc, path, '', '', '')) def get_current_config(facts): """ Get current openshift config Args: facts (dict): existing facts Returns: dict: the facts dict updated with the current openshift config """ current_config = dict() roles = [role for role in facts if role not in ['common', 'provider']] for role in roles: if 'roles' in current_config: current_config['roles'].append(role) else: current_config['roles'] = [role] # TODO: parse the /etc/sysconfig/openshift-{master,node} config to # determine the location of files. # TODO: I suspect this isn't working right now, but it doesn't prevent # anything from working properly as far as I can tell, perhaps because # we override the kubeconfig path everywhere we use it? # Query kubeconfig settings kubeconfig_dir = '/var/lib/origin/openshift.local.certificates' if role == 'node': kubeconfig_dir = os.path.join( kubeconfig_dir, "node-%s" % facts['common']['hostname'] ) kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') if (os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path)): try: _, output, _ = module.run_command( ["/usr/bin/openshift", "ex", "config", "view", "-o", "json", "--kubeconfig=%s" % kubeconfig_path], check_rc=False ) config = json.loads(output) cad = 'certificate-authority-data' try: for cluster in config['clusters']: config['clusters'][cluster][cad] = 'masked' except KeyError: pass try: for user in config['users']: config['users'][user][cad] = 'masked' config['users'][user]['client-key-data'] = 'masked' except KeyError: pass current_config['kubeconfig'] = config # override pylint broad-except warning, since we do not want # to bubble up any exceptions if oc config view # fails # pylint: disable=broad-except except Exception: pass return current_config def build_kubelet_args(facts): """ Build node kubelet_args """ cloud_cfg_path = os.path.join(facts['common']['config_base'], 'cloudprovider') if 'node' in facts: kubelet_args = {} if 'cloudprovider' in facts: if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': kubelet_args['cloud-provider'] = ['aws'] kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf'] if facts['cloudprovider']['kind'] == 'openstack': kubelet_args['cloud-provider'] = ['openstack'] kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if kubelet_args != {}: facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) return facts def build_controller_args(facts): """ Build master controller_args """ cloud_cfg_path = os.path.join(facts['common']['config_base'], 'cloudprovider') if 'master' in facts: controller_args = {} if 'cloudprovider' in facts: if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': controller_args['cloud-provider'] = ['aws'] controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf'] if facts['cloudprovider']['kind'] == 'openstack': controller_args['cloud-provider'] = ['openstack'] controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if controller_args != {}: facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], []) return facts def build_api_server_args(facts): """ Build master api_server_args """ cloud_cfg_path = os.path.join(facts['common']['config_base'], 'cloudprovider') if 'master' in facts: api_server_args = {} if 'cloudprovider' in facts: if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': api_server_args['cloud-provider'] = ['aws'] api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf'] if facts['cloudprovider']['kind'] == 'openstack': api_server_args['cloud-provider'] = ['openstack'] api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if api_server_args != {}: facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], []) return facts def is_service_running(service): """ Queries systemd through dbus to see if the service is running """ service_running = False bus = SystemBus() systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager') try: service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service)) service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit)) service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties') service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState') service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState') if service_load_state == 'loaded' and service_active_state == 'active': service_running = True except DBusException: pass return service_running def get_version_output(binary, version_cmd): """ runs and returns the version output for a command """ cmd = [] for item in (binary, version_cmd): if isinstance(item, list): cmd.extend(item) else: cmd.append(item) if os.path.isfile(cmd[0]): _, output, _ = module.run_command(cmd) return output def get_docker_version_info(): """ Parses and returns the docker version info """ result = None if is_service_running('docker'): version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version')) if 'Server' in version_info: result = { 'api_version': version_info['Server']['API version'], 'version': version_info['Server']['Version'] } return result def get_openshift_version(facts): """ Get current version of openshift on the host Args: facts (dict): existing facts optional cli_image for pulling the version number Returns: version: the current openshift version """ version = None # No need to run this method repeatedly on a system if we already know the # version if 'common' in facts: if 'version' in facts['common'] and facts['common']['version'] is not None: return facts['common']['version'] if os.path.isfile('/usr/bin/openshift'): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) version = parse_openshift_version(output) # openshift_facts runs before openshift_docker_facts. However, it will be # called again and set properly throughout the playbook run. This could be # refactored to simply set the openshift.common.version in the # openshift_docker_facts role but it would take reworking some assumptions # on how get_openshift_version is called. if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']): if 'docker' in facts and 'openshift_version' in facts['docker']: version = facts['docker']['openshift_version'] return version def parse_openshift_version(output): """ Apply provider facts to supplied facts dict Args: string: output of 'openshift version' Returns: string: the version number """ versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e) return versions.get('openshift', '') def apply_provider_facts(facts, provider_facts): """ Apply provider facts to supplied facts dict Args: facts (dict): facts dict to update provider_facts (dict): provider facts to apply roles: host roles Returns: dict: the merged facts """ if not provider_facts: return facts common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] for h_var, ip_var in common_vars: ip_value = provider_facts['network'].get(ip_var) if ip_value: facts['common'][ip_var] = ip_value facts['common'][h_var] = choose_hostname( [provider_facts['network'].get(h_var)], facts['common'][ip_var] ) facts['provider'] = provider_facts return facts # Disabling pylint too many branches. This function needs refactored # but is a very core part of openshift_facts. # pylint: disable=too-many-branches def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite): """ Recursively merge facts dicts Args: orig (dict): existing facts new (dict): facts to update additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] protected_facts_to_overwrite (list): protected facts to overwrite in jinja '.' notation ex: ['master.master_count'] Returns: dict: the merged facts """ additive_facts = ['named_certificates'] protected_facts = ['ha', 'master_count'] # Facts we do not ever want to merge. These originate in inventory variables # and contain JSON dicts. We don't ever want to trigger a merge # here, just completely overwrite with the new if they are present there. inventory_json_facts = ['admission_plugin_config', 'kube_admission_plugin_config', 'image_policy_config'] facts = dict() for key, value in orig.iteritems(): # Key exists in both old and new facts. if key in new: if key in inventory_json_facts: # Watchout for JSON facts that sometimes load as strings. # (can happen if the JSON contains a boolean) if isinstance(new[key], basestring): facts[key] = yaml.safe_load(new[key]) else: facts[key] = copy.deepcopy(new[key]) # Continue to recurse if old and new fact is a dictionary. elif isinstance(value, dict) and isinstance(new[key], dict): # Collect the subset of additive facts to overwrite if # key matches. These will be passed to the subsequent # merge_facts call. relevant_additive_facts = [] for item in additive_facts_to_overwrite: if '.' in item and item.startswith(key + '.'): relevant_additive_facts.append(item) # Collect the subset of protected facts to overwrite # if key matches. These will be passed to the # subsequent merge_facts call. relevant_protected_facts = [] for item in protected_facts_to_overwrite: if '.' in item and item.startswith(key + '.'): relevant_protected_facts.append(item) facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts) # Key matches an additive fact and we are not overwriting # it so we will append the new value to the existing value. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]: if isinstance(value, list) and isinstance(new[key], list): new_fact = [] for item in copy.deepcopy(value) + copy.deepcopy(new[key]): if item not in new_fact: new_fact.append(item) facts[key] = new_fact # Key matches a protected fact and we are not overwriting # it so we will determine if it is okay to change this # fact. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]: # The master count (int) can only increase unless it # has been passed as a protected fact to overwrite. if key == 'master_count': if int(value) <= int(new[key]): facts[key] = copy.deepcopy(new[key]) else: module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # ha (bool) can not change unless it has been passed # as a protected fact to overwrite. if key == 'ha': if safe_get_bool(value) != safe_get_bool(new[key]): module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') else: facts[key] = value # No other condition has been met. Overwrite the old fact # with the new value. else: facts[key] = copy.deepcopy(new[key]) # Key isn't in new so add it to facts to keep it. else: facts[key] = copy.deepcopy(value) new_keys = set(new.keys()) - set(orig.keys()) for key in new_keys: facts[key] = copy.deepcopy(new[key]) return facts def save_local_facts(filename, facts): """ Save local facts Args: filename (str): local facts file facts (dict): facts to set """ try: fact_dir = os.path.dirname(filename) if not os.path.exists(fact_dir): os.makedirs(fact_dir) with open(filename, 'w') as fact_file: fact_file.write(module.jsonify(facts)) os.chmod(filename, 0o600) except (IOError, OSError) as ex: raise OpenShiftFactsFileWriteError( "Could not create fact file: %s, error: %s" % (filename, ex) ) def get_local_facts_from_file(filename): """ Retrieve local facts from fact file Args: filename (str): local facts file Returns: dict: the retrieved facts """ local_facts = dict() try: # Handle conversion of INI style facts file to json style ini_facts = ConfigParser.SafeConfigParser() ini_facts.read(filename) for section in ini_facts.sections(): local_facts[section] = dict() for key, value in ini_facts.items(section): local_facts[section][key] = value except (ConfigParser.MissingSectionHeaderError, ConfigParser.ParsingError): try: with open(filename, 'r') as facts_file: local_facts = json.load(facts_file) except (ValueError, IOError): pass return local_facts def sort_unique(alist): """ Sorts and de-dupes a list Args: list: a list Returns: list: a sorted de-duped list """ alist.sort() out = list() for i in alist: if i not in out: out.append(i) return out def safe_get_bool(fact): """ Get a boolean fact safely. Args: facts: fact to convert Returns: bool: given fact as a bool """ return bool(strtobool(str(fact))) def set_proxy_facts(facts): """ Set global proxy facts and promote defaults from http_proxy, https_proxy, no_proxy to the more specific builddefaults and builddefaults_git vars. 1. http_proxy, https_proxy, no_proxy 2. builddefaults_* 3. builddefaults_git_* Args: facts(dict): existing facts Returns: facts(dict): Updated facts with missing values """ if 'common' in facts: common = facts['common'] if 'http_proxy' in common or 'https_proxy' in common: if 'generate_no_proxy_hosts' in common and \ common['generate_no_proxy_hosts']: if 'no_proxy' in common and \ isinstance(common['no_proxy'], basestring): common['no_proxy'] = common['no_proxy'].split(",") else: common['no_proxy'] = [] if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append(common['hostname']) common['no_proxy'] = sort_unique(common['no_proxy']) facts['common'] = common if 'builddefaults' in facts: facts['master']['admission_plugin_config'] = dict() builddefaults = facts['builddefaults'] common = facts['common'] if 'http_proxy' not in builddefaults and 'http_proxy' in common: builddefaults['http_proxy'] = common['http_proxy'] if 'https_proxy' not in builddefaults and 'https_proxy' in common: builddefaults['https_proxy'] = common['https_proxy'] if 'no_proxy' not in builddefaults and 'no_proxy' in common: builddefaults['no_proxy'] = common['no_proxy'] if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults: builddefaults['git_http_proxy'] = builddefaults['http_proxy'] if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults: builddefaults['git_https_proxy'] = builddefaults['https_proxy'] if 'admission_plugin_config' not in builddefaults: builddefaults['admission_plugin_config'] = dict() if 'config' in builddefaults and ('http_proxy' in builddefaults or \ 'https_proxy' in builddefaults): facts['master']['admission_plugin_config'].update(builddefaults['config']) facts['builddefaults'] = builddefaults return facts # pylint: disable=too-many-statements def set_container_facts_if_unset(facts): """ Set containerized facts. Args: facts (dict): existing facts Returns: dict: the facts dict updated with the generated containerization facts """ deployment_type = facts['common']['deployment_type'] if deployment_type in ['enterprise', 'openshift-enterprise']: master_image = 'openshift3/ose' cli_image = master_image node_image = 'openshift3/node' ovs_image = 'openshift3/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' pod_image = 'openshift3/ose-pod' router_image = 'openshift3/ose-haproxy-router' registry_image = 'openshift3/ose-docker-registry' deployer_image = 'openshift3/ose-deployer' elif deployment_type == 'atomic-enterprise': master_image = 'aep3_beta/aep' cli_image = master_image node_image = 'aep3_beta/node' ovs_image = 'aep3_beta/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' pod_image = 'aep3_beta/aep-pod' router_image = 'aep3_beta/aep-haproxy-router' registry_image = 'aep3_beta/aep-docker-registry' deployer_image = 'aep3_beta/aep-deployer' else: master_image = 'openshift/origin' cli_image = master_image node_image = 'openshift/node' ovs_image = 'openshift/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' pod_image = 'openshift/origin-pod' router_image = 'openshift/origin-haproxy-router' registry_image = 'openshift/origin-docker-registry' deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] if 'cli_image' not in facts['common']: facts['common']['cli_image'] = cli_image if 'pod_image' not in facts['common']: facts['common']['pod_image'] = pod_image if 'router_image' not in facts['common']: facts['common']['router_image'] = router_image if 'registry_image' not in facts['common']: facts['common']['registry_image'] = registry_image if 'deployer_image' not in facts['common']: facts['common']['deployer_image'] = deployer_image if 'etcd' in facts and 'etcd_image' not in facts['etcd']: facts['etcd']['etcd_image'] = etcd_image if 'master' in facts and 'master_image' not in facts['master']: facts['master']['master_image'] = master_image if 'node' in facts: if 'node_image' not in facts['node']: facts['node']['node_image'] = node_image if 'ovs_image' not in facts['node']: facts['node']['ovs_image'] = ovs_image if safe_get_bool(facts['common']['is_containerized']): facts['common']['admin_binary'] = '/usr/local/bin/oadm' facts['common']['client_binary'] = '/usr/local/bin/oc' return facts def set_installed_variant_rpm_facts(facts): """ Set RPM facts of installed variant Args: facts (dict): existing facts Returns: dict: the facts dict updated with installed_variant_rpms """ installed_rpms = [] for base_rpm in ['openshift', 'atomic-openshift', 'origin']: optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] variant_rpms = [base_rpm] + \ ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ ['tuned-profiles-%s-node' % base_rpm] for rpm in variant_rpms: exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) if exit_code == 0: installed_rpms.append(rpm) facts['common']['installed_variant_rpms'] = installed_rpms return facts class OpenShiftFactsInternalError(Exception):
class OpenShiftFactsUnsupportedRoleError(Exception): """Origin Facts Unsupported Role Error""" pass class OpenShiftFactsFileWriteError(Exception): """Origin Facts File Write Error""" pass class OpenShiftFactsMetadataUnavailableError(Exception): """Origin Facts Metadata Unavailable Error""" pass class OpenShiftFacts(object): """ Origin Facts Attributes: facts (dict): facts for the host Args: module (AnsibleModule): an AnsibleModule object role (str): role for setting local facts filename (str): local facts file to use local_facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] protected_facts_to_overwrite (list): protected facts to overwrite in jinja '.' notation ex: ['master.master_count'] Raises: OpenShiftFactsUnsupportedRoleError: """ known_roles = ['builddefaults', 'cloudprovider', 'common', 'docker', 'etcd', 'hosted', 'master', 'node'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=None, openshift_env=None, openshift_env_structures=None, protected_facts_to_overwrite=None): self.changed = False self.filename = filename if role not in self.known_roles: raise OpenShiftFactsUnsupportedRoleError( "Role %s is not supported by this module" % role ) self.role = role self.system_facts = ansible_facts(module) self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite, openshift_env, openshift_env_structures, protected_facts_to_overwrite) def generate_facts(self, local_facts, additive_facts_to_overwrite, openshift_env, openshift_env_structures, protected_facts_to_overwrite): """ Generate facts Args: local_facts (dict): local_facts for overriding generated defaults additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] openshift_env (dict): openshift_env facts for overriding generated defaults protected_facts_to_overwrite (list): protected facts to overwrite in jinja '.' notation ex: ['master.master_count'] Returns: dict: The generated facts """ local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite, openshift_env, openshift_env_structures, protected_facts_to_overwrite) roles = local_facts.keys() if 'common' in local_facts and 'deployment_type' in local_facts['common']: deployment_type = local_facts['common']['deployment_type'] else: deployment_type = 'origin' defaults = self.get_defaults(roles, deployment_type) provider_facts = self.init_provider_facts() facts = apply_provider_facts(defaults, provider_facts) facts = merge_facts(facts, local_facts, additive_facts_to_overwrite, protected_facts_to_overwrite) facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) facts = set_flannel_facts_if_unset(facts) facts = set_nuage_facts_if_unset(facts) facts = set_node_schedulability(facts) facts = set_selectors(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) facts = set_deployment_facts_if_unset(facts) facts = set_container_facts_if_unset(facts) facts = build_kubelet_args(facts) facts = build_controller_args(facts) facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_dnsmasq_facts_if_unset(facts) facts = set_manageiq_facts_if_unset(facts) facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) if not safe_get_bool(facts['common']['is_containerized']): facts = set_installed_variant_rpm_facts(facts) return dict(openshift=facts) def get_defaults(self, roles, deployment_type): """ Get default fact values Args: roles (list): list of roles for this host Returns: dict: The generated default facts """ defaults = {} ip_addr = self.system_facts['default_ipv4']['address'] exit_code, output, _ = module.run_command(['hostname', '-f']) hostname_f = output.strip() if exit_code == 0 else '' hostname_values = [hostname_f, self.system_facts['nodename'], self.system_facts['fqdn']] hostname = choose_hostname(hostname_values, ip_addr) defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr, deployment_type=deployment_type, hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', client_binary='oc', admin_binary='oadm', dns_domain='cluster.local', install_examples=True, debug_level=2) if 'master' in roles: scheduler_predicates = [ {"name": "MatchNodeSelector"}, {"name": "PodFitsResources"}, {"name": "PodFitsPorts"}, {"name": "NoDiskConflict"}, {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}} ] scheduler_priorities = [ {"name": "LeastRequestedPriority", "weight": 1}, {"name": "SelectorSpreadPriority", "weight": 1}, {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}} ] defaults['master'] = dict(api_use_ssl=True, api_port='8443', controllers_port='8444', console_use_ssl=True, console_path='/console', console_port='8443', etcd_use_ssl=True, etcd_hosts='', etcd_port='4001', portal_net='172.30.0.0/16', embedded_etcd=True, embedded_kube=True, embedded_dns=True, bind_addr='0.0.0.0', session_max_seconds=3600, session_name='ssn', session_secrets_file='', access_token_max_seconds=86400, auth_token_max_seconds=500, oauth_grant_method='auto', scheduler_predicates=scheduler_predicates, scheduler_priorities=scheduler_priorities) if 'node' in roles: defaults['node'] = dict(labels={}, annotations={}, iptables_sync_period='5s', local_quota_per_fsgroup="", set_node_ip=False) if 'docker' in roles: docker = dict(disable_push_dockerhub=False) version_info = get_docker_version_info() if version_info is not None: docker['api_version'] = version_info['api_version'] docker['version'] = version_info['version'] defaults['docker'] = docker if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) if 'hosted' in roles or self.role == 'hosted': defaults['hosted'] = dict( metrics=dict( deploy=False, duration=7, resolution=10, storage=dict( kind=None, volume=dict( name='metrics', size='10Gi' ), nfs=dict( directory='/exports', options='*(rw,root_squash)'), host=None, access_modes=['ReadWriteMany'], create_pv=True ) ), registry=dict( storage=dict( kind=None, volume=dict( name='registry', size='5Gi' ), nfs=dict( directory='/exports', options='*(rw,root_squash)'), host=None, access_modes=['ReadWriteMany'], create_pv=True ) ), router=dict() ) return defaults def guess_host_provider(self): """ Guess the host provider Returns: dict: The generated default facts for the detected provider """ # TODO: cloud provider facts should probably be submitted upstream product_name = self.system_facts['product_name'] product_version = self.system_facts['product_version'] virt_type = self.system_facts['virtualization_type'] virt_role = self.system_facts['virtualization_role'] provider = None metadata = None # TODO: this is not exposed through module_utils/facts.py in ansible, # need to create PR for ansible to expose it bios_vendor = get_file_content( '/sys/devices/virtual/dmi/id/bios_vendor' ) if bios_vendor == 'Google': provider = 'gce' metadata_url = ('http://metadata.google.internal/' 'computeMetadata/v1/?recursive=true') headers = {'Metadata-Flavor': 'Google'} metadata = get_provider_metadata(metadata_url, True, headers, True) # Filter sshKeys and serviceAccounts from gce metadata if metadata: metadata['project']['attributes'].pop('sshKeys', None) metadata['instance'].pop('serviceAccounts', None) elif (virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version)): provider = 'aws' metadata_url = 'http://169.254.169.254/latest/meta-data/' metadata = get_provider_metadata(metadata_url) elif re.search(r'OpenStack', product_name): provider = 'openstack' metadata_url = ('http://169.254.169.254/openstack/latest/' 'meta_data.json') metadata = get_provider_metadata(metadata_url, True, None, True) if metadata: ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' metadata['ec2_compat'] = get_provider_metadata( ec2_compat_url ) # disable pylint maybe-no-member because overloaded use of # the module name causes pylint to not detect that results # is an array or hash # pylint: disable=maybe-no-member # Filter public_keys and random_seed from openstack metadata metadata.pop('public_keys', None) metadata.pop('random_seed', None) if not metadata['ec2_compat']: metadata = None return dict(name=provider, metadata=metadata) def init_provider_facts(self): """ Initialize the provider facts Returns: dict: The normalized provider facts """ provider_info = self.guess_host_provider() provider_facts = normalize_provider_facts( provider_info.get('name'), provider_info.get('metadata') ) return provider_facts @staticmethod def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures): """ Split openshift_env facts based on openshift_env structures. Args: openshift_env_fact (string): the openshift_env fact to split ex: 'openshift_cloudprovider_openstack_auth_url' openshift_env_structures (list): a list of structures to determine fact keys ex: ['openshift.cloudprovider.openstack.*'] Returns: list: a list of keys that represent the fact ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url'] """ # By default, we'll split an openshift_env fact by underscores. fact_keys = openshift_env_fact.split('_') # Determine if any of the provided variable structures match the fact. matching_structure = None if openshift_env_structures != None: for structure in openshift_env_structures: if re.match(structure, openshift_env_fact): matching_structure = structure # Fact didn't match any variable structures so return the default fact keys. if matching_structure is None: return fact_keys final_keys = [] structure_keys = matching_structure.split('.') for structure_key in structure_keys: # Matched current key. Add to final keys. if structure_key == fact_keys[structure_keys.index(structure_key)]: final_keys.append(structure_key) # Wildcard means we will be taking everything from here to the end of the fact. elif structure_key == '*': final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):])) # Shouldn't have gotten here, return the fact keys. else: return fact_keys return final_keys # Disabling too-many-branches and too-many-locals. # This should be cleaned up as a TODO item. #pylint: disable=too-many-branches, too-many-locals def init_local_facts(self, facts=None, additive_facts_to_overwrite=None, openshift_env=None, openshift_env_structures=None, protected_facts_to_overwrite=None): """ Initialize the local facts Args: facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] openshift_env (dict): openshift env facts to set protected_facts_to_overwrite (list): protected facts to overwrite in jinja '.' notation ex: ['master.master_count'] Returns: dict: The result of merging the provided facts with existing local facts """ changed = False facts_to_set = dict() if facts is not None: facts_to_set[self.role] = facts if openshift_env != {} and openshift_env != None: for fact, value in openshift_env.iteritems(): oo_env_facts = dict() current_level = oo_env_facts keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:] if len(keys) > 0 and keys[0] != self.role: continue for key in keys: if key == keys[-1]: current_level[key] = value elif key not in current_level: current_level[key] = dict() current_level = current_level[key] facts_to_set = merge_facts(orig=facts_to_set, new=oo_env_facts, additive_facts_to_overwrite=[], protected_facts_to_overwrite=[]) local_facts = get_local_facts_from_file(self.filename) migrated_facts = migrate_local_facts(local_facts) new_local_facts = merge_facts(migrated_facts, facts_to_set, additive_facts_to_overwrite, protected_facts_to_overwrite) if 'docker' in new_local_facts: # remove duplicate and empty strings from registry lists for cat in ['additional', 'blocked', 'insecure']: key = '{0}_registries'.format(cat) if key in new_local_facts['docker']: val = new_local_facts['docker'][key] if isinstance(val, basestring): val = [x.strip() for x in val.split(',')] new_local_facts['docker'][key] = list(set(val) - set([''])) # Convert legacy log_options comma sep string to a list if present: if 'log_options' in new_local_facts['docker'] and \ isinstance(new_local_facts['docker']['log_options'], basestring): new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',') new_local_facts = self.remove_empty_facts(new_local_facts) if new_local_facts != local_facts: self.validate_local_facts(new_local_facts) changed = True if not module.check_mode: save_local_facts(self.filename, new_local_facts) self.changed = changed return new_local_facts def remove_empty_facts(self, facts=None): """ Remove empty facts Args: facts (dict): facts to clean """ facts_to_remove = [] for fact, value in facts.iteritems(): if isinstance(facts[fact], dict): facts[fact] = self.remove_empty_facts(facts[fact]) else: if value == "" or value == [""] or value is None: facts_to_remove.append(fact) for fact in facts_to_remove: del facts[fact] return facts def validate_local_facts(self, facts=None): """ Validate local facts Args: facts (dict): local facts to validate """ invalid_facts = dict() invalid_facts = self.validate_master_facts(facts, invalid_facts) if invalid_facts: msg = 'Invalid facts detected:\n' for key in invalid_facts.keys(): msg += '{0}: {1}\n'.format(key, invalid_facts[key]) module.fail_json(msg=msg, changed=self.changed) # disabling pylint errors for line-too-long since we're dealing # with best effort reduction of error messages here. # disabling errors for too-many-branches since we require checking # many conditions. # pylint: disable=line-too-long, too-many-branches @staticmethod def validate_master_facts(facts, invalid_facts): """ Validate master facts Args: facts (dict): local facts to validate invalid_facts (dict): collected invalid_facts Returns: dict: Invalid facts """ if 'master' in facts: # openshift.master.session_auth_secrets if 'session_auth_secrets' in facts['master']: session_auth_secrets = facts['master']['session_auth_secrets'] if not issubclass(type(session_auth_secrets), list): invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.' elif 'session_encryption_secrets' not in facts['master']: invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set ' 'if openshift_master_session_auth_secrets is provided.') elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']): invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and ' 'openshift_master_session_encryption_secrets must be ' 'equal length.') else: for secret in session_auth_secrets: if len(secret) < 32: invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. ' 'Secrets must be at least 32 characters in length.') # openshift.master.session_encryption_secrets if 'session_encryption_secrets' in facts['master']: session_encryption_secrets = facts['master']['session_encryption_secrets'] if not issubclass(type(session_encryption_secrets), list): invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.' elif 'session_auth_secrets' not in facts['master']: invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be ' 'set if openshift_master_session_encryption_secrets ' 'is provided.') else: for secret in session_encryption_secrets: if len(secret) not in [16, 24, 32]: invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. ' 'Secrets must be 16, 24, or 32 characters in length.') return invalid_facts def main(): """ main """ # disabling pylint errors for global-variable-undefined and invalid-name # for 'global module' usage, since it is required to use ansible_facts # pylint: disable=global-variable-undefined, invalid-name global module module = AnsibleModule( argument_spec=dict( role=dict(default='common', required=False, choices=OpenShiftFacts.known_roles), local_facts=dict(default=None, type='dict', required=False), additive_facts_to_overwrite=dict(default=[], type='list', required=False), openshift_env=dict(default={}, type='dict', required=False), openshift_env_structures=dict(default=[], type='list', required=False), protected_facts_to_overwrite=dict(default=[], type='list', required=False), ), supports_check_mode=True, add_file_common_args=True, ) role = module.params['role'] local_facts = module.params['local_facts'] additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] openshift_env = module.params['openshift_env'] openshift_env_structures = module.params['openshift_env_structures'] protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] fact_file = '/etc/ansible/facts.d/openshift.fact' openshift_facts = OpenShiftFacts(role, fact_file, local_facts, additive_facts_to_overwrite, openshift_env, openshift_env_structures, protected_facts_to_overwrite) file_params = module.params.copy() file_params['path'] = fact_file file_args = module.load_file_common_arguments(file_params) changed = module.set_fs_attributes_if_different(file_args, openshift_facts.changed) return module.exit_json(changed=changed, ansible_facts=openshift_facts.facts) # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.facts import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
"""Origin Facts Error""" pass
CalendarModel.d.ts
import ComponentModel from '../../model/Component'; import Calendar from './Calendar'; import { ComponentOption, BoxLayoutOptionMixin, LayoutOrient, LineStyleOption, ItemStyleOption, LabelOption, OptionDataValueDate } from '../../util/types'; import GlobalModel from '../../model/Global'; import Model from '../../model/Model'; export interface CalendarMonthLabelFormatterCallbackParams { nameMap: string; yyyy: string; yy: string; MM: string; M: number; } export interface CalendarYearLabelFormatterCallbackParams { nameMap: string; start: string; end: string; } export interface CalendarOption extends ComponentOption, BoxLayoutOptionMixin { cellSize?: number | 'auto' | (number | 'auto')[]; orient?: LayoutOrient; splitLine?: { show?: boolean; lineStyle?: LineStyleOption; }; itemStyle?: ItemStyleOption; range?: OptionDataValueDate | (OptionDataValueDate)[]; dayLabel?: Omit<LabelOption, 'position'> & { firstDay?: number; margin?: number | string; position?: 'start' | 'end'; nameMap?: 'en' | 'cn' | string[]; }; monthLabel?: Omit<LabelOption, 'position'> & { margin?: number; position?: 'start' | 'end'; nameMap?: 'en' | 'cn' | string[]; formatter?: string | ((params: CalendarMonthLabelFormatterCallbackParams) => string); }; yearLabel?: Omit<LabelOption, 'position'> & { margin?: number; position?: 'top' | 'bottom' | 'left' | 'right'; formatter?: string | ((params: CalendarYearLabelFormatterCallbackParams) => string); }; } declare class
extends ComponentModel<CalendarOption> { static type: string; type: string; coordinateSystem: Calendar; init(option: CalendarOption, parentModel: Model, ecModel: GlobalModel): void; mergeOption(option: CalendarOption): void; getCellSize(): LineAndPositionSetting[]; static defaultOption: CalendarOption; } export default CalendarModel;
CalendarModel
github.js
// // Github Extension (WIP) // ~~strike-through~~ -> <del>strike-through</del> // (function () { var github = function (converter) { return [ { // strike-through // NOTE: showdown already replaced "~" with "~T", so we need to adjust accordingly. type : 'lang', regex : '(~T){2}([^~]+)(~T){2}', replace : function (match, prefix, content, suffix) { return '<del>' + content + '</del>'; } }, { // GFM newline and underscore modifications, happen BEFORE showdown type : 'lang', filter : function (text) { var extractions = {}, imageMarkdownRegex = /^(?:\{(.*?)\})?!(?:\[([^\n\]]*)\])(?:\(([^\n\]]*)\))?$/gim, hashID = 0; function hashId() { return hashID++; } // Extract pre blocks text = text.replace(/<pre>[\s\S]*?<\/pre>/gim, function (x) { var hash = hashId(); extractions[hash] = x; return "{gfm-js-extract-pre-" + hash + "}"; }, 'm'); // Extract code blocks text = text.replace(/```[\s\S]*```/gim, function (x) { var hash = hashId(); extractions[hash] = x; return "{gfm-js-extract-code-" + hash + "}"; }, 'm'); //prevent foo_bar and foo_bar_baz from ending up with an italic word in the middle text = text.replace(/(^(?! {4}|\t)\w+_\w+_\w[\w_]*)/gm, function (x) { return x.replace(/_/gm, '\\_'); }); text = text.replace(/\{gfm-js-extract-code-([0-9]+)\}/gm, function (x, y) { return extractions[y]; }); // in very clear cases, let newlines become <br /> tags text = text.replace(/^[\w\<\"\'][^\n]*\n+/gm, function (x) { return x.match(/\n{2}/) ? x : x.trim() + " \n"; });
// better URL support, but no title support text = text.replace(imageMarkdownRegex, function (match, key, alt, src) { if (src) { return '<img src="' + src + '" alt="' + alt + '" />'; } return ''; }); text = text.replace(/\{gfm-js-extract-pre-([0-9]+)\}/gm, function (x, y) { return "\n\n" + extractions[y]; }); return text; } }, { // GFM autolinking & custom image handling, happens AFTER showdown type : 'html', filter : function (text) { var refExtractions = {}, preExtractions = {}, hashID = 0; function hashId() { return hashID++; } // Extract pre blocks text = text.replace(/<(pre|code)>[\s\S]*?<\/(\1)>/gim, function (x) { var hash = hashId(); preExtractions[hash] = x; return "{gfm-js-extract-pre-" + hash + "}"; }, 'm'); // filter out def urls // from Marked https://github.com/chjj/marked/blob/master/lib/marked.js#L24 text = text.replace(/^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)/gmi, function (x) { var hash = hashId(); refExtractions[hash] = x; return "{gfm-js-extract-ref-url-" + hash + "}"; }); // match a URL // adapted from https://gist.github.com/jorilallo/1283095#L158 // and http://blog.stevenlevithan.com/archives/mimic-lookbehind-javascript text = text.replace(/(\]\(|\]|\[|<a[^\>]*?\>)?https?\:\/\/[^"\s\<\>]*[^.,;'">\:\s\<\>\)\]\!]/gmi, function (wholeMatch, lookBehind, matchIndex) { // Check we are not inside an HTML tag var left = text.slice(0, matchIndex), right = text.slice(matchIndex); if ((left.match(/<[^>]+$/) && right.match(/^[^>]*>/)) || lookBehind) { return wholeMatch; } // If we have a matching lookBehind, this is a failure, else wrap the match in <a> tag return lookBehind ? wholeMatch : "<a href='" + wholeMatch + "'>" + wholeMatch + "</a>"; }); // match email text = text.replace(/[a-z0-9_\-+=.]+@[a-z0-9\-]+(\.[a-z0-9-]+)+/gmi, function (wholeMatch) { return "<a href='mailto:" + wholeMatch + "'>" + wholeMatch + "</a>"; }); // replace extractions text = text.replace(/\{gfm-js-extract-pre-([0-9]+)\}/gm, function (x, y) { return preExtractions[y]; }); text = text.replace(/\{gfm-js-extract-ref-url-([0-9]+)\}/gi, function (x, y) { return "\n\n" + refExtractions[y]; }); return text; } } ]; }; // Client-side export if (typeof window !== 'undefined' && window.Showdown && window.Showdown.extensions) { window.Showdown.extensions.github = github; } // Server-side export if (typeof module !== 'undefined') module.exports = github; }());
gen_fmt_linux_amd64.go
package stdgolibs import ( pkg "fmt" "reflect" ) func init()
{ registerValues("fmt", map[string]reflect.Value{ // Functions "Scan": reflect.ValueOf(pkg.Scan), "Scanln": reflect.ValueOf(pkg.Scanln), "Scanf": reflect.ValueOf(pkg.Scanf), "Sscan": reflect.ValueOf(pkg.Sscan), "Sscanln": reflect.ValueOf(pkg.Sscanln), "Sscanf": reflect.ValueOf(pkg.Sscanf), "Fscan": reflect.ValueOf(pkg.Fscan), "Fscanln": reflect.ValueOf(pkg.Fscanln), "Fscanf": reflect.ValueOf(pkg.Fscanf), "Errorf": reflect.ValueOf(pkg.Errorf), "Fprintf": reflect.ValueOf(pkg.Fprintf), "Printf": reflect.ValueOf(pkg.Printf), "Sprintf": reflect.ValueOf(pkg.Sprintf), "Fprint": reflect.ValueOf(pkg.Fprint), "Print": reflect.ValueOf(pkg.Print), "Sprint": reflect.ValueOf(pkg.Sprint), "Fprintln": reflect.ValueOf(pkg.Fprintln), "Println": reflect.ValueOf(pkg.Println), "Sprintln": reflect.ValueOf(pkg.Sprintln), // Consts // Variables }) registerTypes("fmt", map[string]reflect.Type{ // Non interfaces }) }
4.Events.js
"use strict"; var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __importStar = (this && this.__importStar) || function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); __setModuleDefault(result, mod); return result; }; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.WAConnection = void 0; const QR = __importStar(require("qrcode-terminal")); const _3_Connect_1 = require("./3.Connect"); const Constants_1 = require("./Constants"); const Utils_1 = require("./Utils"); const keyed_db_1 = __importDefault(require("@adiwajshing/keyed-db")); const Mutex_1 = require("./Mutex"); class
extends _3_Connect_1.WAConnection { constructor() { super(); /** find a chat or return an error */ this.assertChatGet = jid => { const chat = this.chats.get(jid); if (!chat) throw new Error(`chat '${jid}' not found`); return chat; }; this.emitParticipantsUpdate = (jid, participants, action) => { const chat = this.chats.get(jid); const meta = chat === null || chat === void 0 ? void 0 : chat.metadata; if (meta) { switch (action) { case 'add': participants.forEach(jid => (meta.participants.push({ ...this.contactAddOrGet(jid), isAdmin: false, isSuperAdmin: false }))); break; case 'remove': meta.participants = meta.participants.filter(p => !participants.includes(p.jid)); break; case 'promote': case 'demote': const isAdmin = action === 'promote'; meta.participants.forEach(p => { if (participants.includes(p.jid)) p.isAdmin = isAdmin; }); break; } } this.emit('group-participants-update', { jid, participants, action }); }; this.emitGroupUpdate = (jid, update) => { const chat = this.chats.get(jid); if (chat.metadata) Object.assign(chat.metadata, update); this.emit('group-update', { jid, ...update }); }; this.chatUpdateTime = (chat, stamp) => this.chats.update(chat.jid, c => c.t = stamp); this.setMaxListeners(30); this.chatsDebounceTimeout.setTask(() => { this.logger.debug('pinging with chats query'); this.sendChatsQuery(this.msgCount); this.chatsDebounceTimeout.start(); }); this.on('open', () => { // send queries WA Web expects this.sendBinary(['query', { type: 'contacts', epoch: '1' }, null], [Constants_1.WAMetric.queryContact, Constants_1.WAFlag.ignore]); this.sendBinary(['query', { type: 'status', epoch: '1' }, null], [Constants_1.WAMetric.queryStatus, Constants_1.WAFlag.ignore]); this.sendBinary(['query', { type: 'quick_reply', epoch: '1' }, null], [Constants_1.WAMetric.queryQuickReply, Constants_1.WAFlag.ignore]); this.sendBinary(['query', { type: 'label', epoch: '1' }, null], [Constants_1.WAMetric.queryLabel, Constants_1.WAFlag.ignore]); this.sendBinary(['query', { type: 'emoji', epoch: '1' }, null], [Constants_1.WAMetric.queryEmoji, Constants_1.WAFlag.ignore]); this.sendBinary(['action', { type: 'set', epoch: '1' }, [['presence', { type: Constants_1.Presence.available }, null]]], [Constants_1.WAMetric.presence, Constants_1.WAFlag.available]); if (this.connectOptions.queryChatsTillReceived) { this.chatsDebounceTimeout.start(); } else { this.sendChatsQuery(1); } this.logger.debug('sent init queries'); }); // on disconnects this.on('CB:Cmd,type:disconnect', json => (this.state === 'open' && this.unexpectedDisconnect(json[1].kind || 'unknown'))); this.on('CB:Pong', json => { if (!json[1]) { this.unexpectedDisconnect(Constants_1.DisconnectReason.close); this.logger.info('Connection terminated by phone, closing...'); } else if (this.phoneConnected !== json[1]) { this.phoneConnected = json[1]; this.emit('connection-phone-change', { connected: this.phoneConnected }); } }); // chats received this.on('CB:response,type:chat', json => { if (json[1].duplicate || !json[2]) return; this.chatsDebounceTimeout.cancel(); const chats = new keyed_db_1.default(this.chatOrderingKey, c => c.jid); json[2].forEach(([item, chat]) => { if (!chat) { this.logger.warn(`unexpectedly got null chat: ${item}`, chat); return; } chat.jid = Utils_1.whatsappID(chat.jid); chat.t = +chat.t; chat.count = +chat.count; chat.messages = Utils_1.newMessagesDB(); // chats data (log json to see what it looks like) chats.insertIfAbsent(chat); }); this.logger.info(`received ${json[2].length} chats`); const oldChats = this.chats; const updatedChats = []; let hasNewChats = false; chats.all().forEach(chat => { const respectiveContact = this.contacts[chat.jid]; chat.name = (respectiveContact === null || respectiveContact === void 0 ? void 0 : respectiveContact.name) || (respectiveContact === null || respectiveContact === void 0 ? void 0 : respectiveContact.notify) || chat.name; const oldChat = oldChats.get(chat.jid); if (!oldChat) { hasNewChats = true; } else { chat.messages = oldChat.messages; if (oldChat.t !== chat.t || oldChat.modify_tag !== chat.modify_tag) { const changes = Utils_1.shallowChanges(oldChat, chat, { lookForDeletedKeys: true }); delete chat.metadata; // remove group metadata as that may have changed; TODO, write better mechanism for this delete changes.messages; updatedChats.push({ ...changes, jid: chat.jid }); } } }); this.chats = chats; this.lastChatsReceived = new Date(); updatedChats.length > 0 && this.emit('chats-update', updatedChats); this.emit('chats-received', { hasNewChats }); }); // we store these last messages const lastMessages = {}; // keep track of overlaps, // if there are no overlaps of messages and we had messages present, we clear the previous messages // this prevents missing messages in conversations let overlaps = {}; const onLastBatchOfDataReceived = () => { // find which chats had missing messages // list out all the jids, and how many messages we've cached now const chatsWithMissingMessages = Object.keys(overlaps).map(jid => { // if there was no overlap, delete previous messages if (!overlaps[jid].didOverlap && overlaps[jid].requiresOverlap) { this.logger.debug(`received messages for ${jid}, but did not overlap with previous messages, clearing...`); const chat = this.chats.get(jid); if (chat) { const message = chat.messages.get(lastMessages[jid]); const remainingMessages = chat.messages.paginatedByValue(message, this.maxCachedMessages, undefined, 'after'); chat.messages = Utils_1.newMessagesDB([message, ...remainingMessages]); return { jid, count: chat.messages.length }; // return number of messages we've left } } }).filter(Boolean); this.emit('initial-data-received', { chatsWithMissingMessages }); }; // messages received const messagesUpdate = (json, style) => { //console.log('msg ', json[1]) this.messagesDebounceTimeout.start(undefined, onLastBatchOfDataReceived); if (style === 'last') { overlaps = {}; } const messages = json[2]; if (messages) { const updates = {}; messages.reverse().forEach(([, , message]) => { const jid = message.key.remoteJid; const chat = this.chats.get(jid); const mKeyID = Utils_1.WA_MESSAGE_ID(message); if (chat) { if (style === 'previous') { const fm = chat.messages.get(lastMessages[jid]); if (!fm) return; const prevEpoch = fm['epoch']; message['epoch'] = prevEpoch - 1; } else if (style === 'last') { // no overlap required, if there were no previous messages overlaps[jid] = { requiresOverlap: chat.messages.length > 0 }; const lm = chat.messages.all()[chat.messages.length - 1]; const prevEpoch = (lm && lm['epoch']) || 0; // hacky way to allow more previous messages message['epoch'] = prevEpoch + 1000; } if (chat.messages.upsert(message).length > 0) { overlaps[jid] = { ...(overlaps[jid] || { requiresOverlap: true }), didOverlap: true }; } updates[jid] = updates[jid] || Utils_1.newMessagesDB(); updates[jid].upsert(message); lastMessages[jid] = mKeyID; } else if (!chat) this.logger.debug({ jid }, `chat not found`); }); if (Object.keys(updates).length > 0) { this.emit('chats-update', Object.keys(updates).map(jid => ({ jid, messages: updates[jid] }))); } } }; this.on('CB:action,add:last', json => messagesUpdate(json, 'last')); this.on('CB:action,add:before', json => messagesUpdate(json, 'previous')); this.on('CB:action,add:unread', json => messagesUpdate(json, 'previous')); // contacts received this.on('CB:response,type:contacts', json => { if (json[1].duplicate || !json[2]) return; const contacts = this.contacts; const updatedContacts = []; json[2].forEach(([type, contact]) => { if (!contact) return this.logger.info(`unexpectedly got null contact: ${type}`, contact); contact.jid = Utils_1.whatsappID(contact.jid); const presentContact = contacts[contact.jid]; if (presentContact) { const changes = Utils_1.shallowChanges(presentContact, contact, { lookForDeletedKeys: false }); if (changes && Object.keys(changes).length > 0) { updatedContacts.push({ ...changes, jid: contact.jid }); } } else updatedContacts.push(contact); contacts[contact.jid] = { ...(presentContact || {}), ...contact }; }); // update chat names const updatedChats = []; this.chats.all().forEach(c => { const contact = contacts[c.jid]; if (contact) { const name = (contact === null || contact === void 0 ? void 0 : contact.name) || (contact === null || contact === void 0 ? void 0 : contact.notify) || c.name; if (name !== c.name) { updatedChats.push({ jid: c.jid, name }); } } }); updatedChats.length > 0 && this.emit('chats-update', updatedChats); this.logger.info(`received ${json[2].length} contacts`); this.contacts = contacts; this.emit('contacts-received', { updatedContacts }); }); // new messages this.on('CB:action,add:relay,message', json => { const message = json[2][0][2]; this.chatAddMessageAppropriate(message); }); this.on('CB:Chat,cmd:action', json => { const data = json[1].data; if (data) { const emitGroupParticipantsUpdate = (action) => this.emitParticipantsUpdate(json[1].id, data[2].participants.map(Utils_1.whatsappID), action); const emitGroupUpdate = (data) => this.emitGroupUpdate(json[1].id, data); switch (data[0]) { case "promote": emitGroupParticipantsUpdate('promote'); break; case "demote": emitGroupParticipantsUpdate('demote'); break; case "desc_add": emitGroupUpdate({ ...data[2], descOwner: data[1] }); break; default: this.logger.debug({ unhandled: true }, json); break; } } }); // presence updates this.on('CB:Presence', json => { const chatUpdate = this.applyingPresenceUpdate(json[1]); chatUpdate && this.emit('chat-update', chatUpdate); }); // If a message has been updated (usually called when a video message gets its upload url, or live locations) this.on('CB:action,add:update,message', json => { const message = json[2][0][2]; const jid = Utils_1.whatsappID(message.key.remoteJid); const chat = this.chats.get(jid); if (!chat) return; // reinsert to update const oldMessage = chat.messages.get(Utils_1.WA_MESSAGE_ID(message)); if (oldMessage) { message['epoch'] = oldMessage['epoch']; if (chat.messages.upsert(message).length) { const chatUpdate = { jid, messages: Utils_1.newMessagesDB([message]) }; this.emit('chat-update', chatUpdate); } } else { this.logger.debug({ unhandled: true }, 'received message update for non-present message from ' + jid); } }); // message status updates const onMessageStatusUpdate = json => { json = json[2][0][1]; const MAP = { read: Constants_1.WA_MESSAGE_STATUS_TYPE.READ, message: Constants_1.WA_MESSAGE_STATUS_TYPE.DELIVERY_ACK, error: Constants_1.WA_MESSAGE_STATUS_TYPE.ERROR }; this.onMessageStatusUpdate(Utils_1.whatsappID(json.jid), { id: json.index, fromMe: json.owner === 'true' }, MAP[json.type]); }; this.on('CB:action,add:relay,received', onMessageStatusUpdate); this.on('CB:action,,received', onMessageStatusUpdate); this.on('CB:Msg,cmd:ack', json => (this.onMessageStatusUpdate(Utils_1.whatsappID(json[1].to), { id: json[1].id, fromMe: true }, +json[1].ack + 1))); // If a user's contact has changed this.on('CB:action,,user', json => { const node = json[2][0]; if (node) { const user = node[1]; user.jid = Utils_1.whatsappID(user.jid); this.contacts[user.jid] = user; this.emit('contact-update', user); const chat = this.chats.get(user.jid); if (chat) { chat.name = user.name || user.notify || chat.name; this.emit('chat-update', { jid: chat.jid, name: chat.name }); } } }); // chat archive, pin etc. this.on('CB:action,,chat', json => { var _a; json = json[2][0]; const updateType = json[1].type; const jid = Utils_1.whatsappID((_a = json[1]) === null || _a === void 0 ? void 0 : _a.jid); const chat = this.chats.get(jid); if (!chat) return; const FUNCTIONS = { 'delete': () => { chat['delete'] = 'true'; this.chats.deleteById(chat.jid); return 'delete'; }, 'clear': () => { if (!json[2]) chat.messages.clear(); else json[2].forEach(item => chat.messages.filter(m => m.key.id !== item[1].index)); return 'clear'; }, 'archive': () => { this.chats.update(chat.jid, chat => chat.archive = 'true'); return 'archive'; }, 'unarchive': () => { delete chat.archive; return 'archive'; }, 'pin': () => { chat.pin = json[1].pin; return 'pin'; } }; const func = FUNCTIONS[updateType]; if (func) { const property = func(); this.emit('chat-update', { jid, [property]: chat[property] || 'false' }); } }); // profile picture updates this.on('CB:Cmd,type:picture', async (json) => { json = json[1]; const jid = Utils_1.whatsappID(json.jid); const imgUrl = await this.getProfilePicture(jid).catch(() => ''); const contact = this.contacts[jid]; if (contact) { contact.imgUrl = imgUrl; this.emit('contact-update', { jid, imgUrl }); } const chat = this.chats.get(jid); if (chat) { chat.imgUrl = imgUrl; this.emit('chat-update', { jid, imgUrl }); } }); // status updates this.on('CB:Status,status', async (json) => { const jid = Utils_1.whatsappID(json[1].id); this.emit('contact-update', { jid, status: json[1].status }); }); // User Profile Name Updates this.on('CB:Conn,pushname', json => { if (this.user) { const name = json[1].pushname; if (this.user.name !== name) { this.user.name = name; // update on client too this.emit('contact-update', { jid: this.user.jid, name }); } } }); // read updates this.on('CB:action,,read', async (json) => { const update = json[2][0][1]; const jid = Utils_1.whatsappID(update.jid); const chat = this.chats.get(jid); if (chat) { if (update.type === 'false') chat.count = -1; else chat.count = 0; this.emit('chat-update', { jid: chat.jid, count: chat.count }); } else { this.logger.warn('recieved read update for unknown chat ' + jid); } }); this.on('qr', qr => QR.generate(qr, { small: true })); // blocklist updates this.on('CB:Blocklist', json => { json = json[1]; const initial = this.blocklist; this.blocklist = json.blocklist; const added = this.blocklist.filter(id => !initial.includes(id)); const removed = initial.filter(id => !this.blocklist.includes(id)); const update = { added, removed }; this.emit('blocklist-update', update); }); } sendChatsQuery(epoch) { return this.sendBinary(['query', { type: 'chat', epoch: epoch.toString() }, null], [Constants_1.WAMetric.queryChat, Constants_1.WAFlag.ignore]); } /** Get the URL to download the profile picture of a person/group */ async getProfilePicture(jid) { const response = await this.query({ json: ['query', 'ProfilePicThumb', jid || this.user.jid], expect200: true, requiresPhoneConnection: false }); return response.eurl; } applyingPresenceUpdate(update) { var _a, _b; const chatId = Utils_1.whatsappID(update.id); const jid = Utils_1.whatsappID(update.participant || update.id); const chat = this.chats.get(chatId); if (chat && jid.endsWith('@s.whatsapp.net')) { // if its a single chat chat.presences = chat.presences || {}; const presence = { ...(chat.presences[jid] || {}) }; if (update.t) presence.lastSeen = +update.t; else if (update.type === Constants_1.Presence.unavailable && (presence.lastKnownPresence === Constants_1.Presence.available || presence.lastKnownPresence === Constants_1.Presence.composing)) { presence.lastSeen = Utils_1.unixTimestampSeconds(); } presence.lastKnownPresence = update.type; // no update if (presence.lastKnownPresence === ((_a = chat.presences[jid]) === null || _a === void 0 ? void 0 : _a.lastKnownPresence) && presence.lastSeen === ((_b = chat.presences[jid]) === null || _b === void 0 ? void 0 : _b.lastSeen)) { return; } const contact = this.contacts[jid]; if (contact) { presence.name = contact.name || contact.notify || contact.vname; } chat.presences[jid] = presence; return { jid: chatId, presences: { [jid]: presence } }; } } /** inserts an empty chat into the DB */ chatAdd(jid, name, properties = {}) { const chat = { jid, name, t: Utils_1.unixTimestampSeconds(), messages: Utils_1.newMessagesDB(), count: 0, ...(properties || {}) }; if (this.chats.insertIfAbsent(chat).length) { this.emit('chat-new', chat); return chat; } } onMessageStatusUpdate(jid, key, status) { const chat = this.chats.get(Utils_1.whatsappID(jid)); const msg = chat === null || chat === void 0 ? void 0 : chat.messages.get(Utils_1.GET_MESSAGE_ID(key)); if (msg) { if (typeof status !== 'undefined') { if (status > msg.status || status === Constants_1.WA_MESSAGE_STATUS_TYPE.ERROR) { msg.status = status; this.emit('chat-update', { jid: chat.jid, messages: Utils_1.newMessagesDB([msg]) }); } } else { this.logger.warn({ update: status }, 'received unknown message status update'); } } else { this.logger.debug({ unhandled: true, update: status, key }, 'received message status update for non-present message'); } } contactAddOrGet(jid) { jid = Utils_1.whatsappID(jid); if (!this.contacts[jid]) this.contacts[jid] = { jid }; return this.contacts[jid]; } /** Adds the given message to the appropriate chat, if the chat doesn't exist, it is created */ async chatAddMessageAppropriate(message) { const jid = Utils_1.whatsappID(message.key.remoteJid); if (Utils_1.isGroupID(jid) && !jid.includes('-')) { this.logger.warn({ gid: jid }, 'recieved odd group ID'); return; } const chat = this.chats.get(jid) || await this.chatAdd(jid); this.chatAddMessage(message, chat); } chatAddMessage(message, chat) { var _a, _b, _c, _d; // store updates in this const chatUpdate = { jid: chat.jid }; // add to count if the message isn't from me & there exists a message if (!message.key.fromMe && message.message) { chat.count += 1; chatUpdate.count = chat.count; const participant = Utils_1.whatsappID(message.participant || chat.jid); const contact = chat.presences && chat.presences[participant]; if ((contact === null || contact === void 0 ? void 0 : contact.lastKnownPresence) === Constants_1.Presence.composing) { // update presence const update = this.applyingPresenceUpdate({ id: chat.jid, participant, type: Constants_1.Presence.available }); update && Object.assign(chatUpdate, update); } } const ephemeralProtocolMsg = (_c = (_b = (_a = message.message) === null || _a === void 0 ? void 0 : _a.ephemeralMessage) === null || _b === void 0 ? void 0 : _b.message) === null || _c === void 0 ? void 0 : _c.protocolMessage; if (ephemeralProtocolMsg && ephemeralProtocolMsg.type === Constants_1.WAMessageProto.ProtocolMessage.ProtocolMessageType.EPHEMERAL_SETTING) { chatUpdate.eph_setting_ts = message.messageTimestamp.toString(); chatUpdate.ephemeral = ephemeralProtocolMsg.ephemeralExpiration.toString(); if (ephemeralProtocolMsg.ephemeralExpiration) { chat.eph_setting_ts = chatUpdate.eph_setting_ts; chat.ephemeral = chatUpdate.ephemeral; } else { delete chat.eph_setting_ts; delete chat.ephemeral; } } const messages = chat.messages; const protocolMessage = (_d = message.message) === null || _d === void 0 ? void 0 : _d.protocolMessage; // if it's a message to delete another message if (protocolMessage) { switch (protocolMessage.type) { case Constants_1.WAMessageProto.ProtocolMessage.ProtocolMessageType.REVOKE: const found = chat.messages.get(Utils_1.GET_MESSAGE_ID(protocolMessage.key)); if (found === null || found === void 0 ? void 0 : found.message) { this.logger.info('deleting message: ' + protocolMessage.key.id + ' in chat: ' + protocolMessage.key.remoteJid); found.messageStubType = Constants_1.WA_MESSAGE_STUB_TYPE.REVOKE; delete found.message; chatUpdate.messages = Utils_1.newMessagesDB([found]); } break; default: break; } } else if (!messages.get(Utils_1.WA_MESSAGE_ID(message))) { // if the message is not already there const lastEpoch = (messages.last && messages.last['epoch']) || 0; message['epoch'] = lastEpoch + 1; messages.insert(message); while (messages.length > this.maxCachedMessages) { messages.delete(messages.all()[0]); // delete oldest messages } // only update if it's an actual message if (message.message && !ephemeralProtocolMsg) { this.chats.update(chat.jid, chat => { chat.t = +Utils_1.toNumber(message.messageTimestamp); chatUpdate.t = chat.t; // a new message unarchives the chat if (chat.archive) { delete chat.archive; chatUpdate.archive = 'false'; } }); } chatUpdate.hasNewMessage = true; chatUpdate.messages = Utils_1.newMessagesDB([message]); // check if the message is an action if (message.messageStubType) { const jid = chat.jid; //let actor = whatsappID (message.participant) let participants; const emitParticipantsUpdate = (action) => (this.emitParticipantsUpdate(jid, participants, action)); const emitGroupUpdate = (update) => this.emitGroupUpdate(jid, update); switch (message.messageStubType) { case Constants_1.WA_MESSAGE_STUB_TYPE.CHANGE_EPHEMERAL_SETTING: chatUpdate.eph_setting_ts = message.messageTimestamp.toString(); chatUpdate.ephemeral = message.messageStubParameters[0]; if (+chatUpdate.ephemeral) { chat.eph_setting_ts = chatUpdate.eph_setting_ts; chat.ephemeral = chatUpdate.ephemeral; } else { delete chat.eph_setting_ts; delete chat.ephemeral; } break; case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_PARTICIPANT_LEAVE: case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_PARTICIPANT_REMOVE: participants = message.messageStubParameters.map(Utils_1.whatsappID); emitParticipantsUpdate('remove'); // mark the chat read only if you left the group if (participants.includes(this.user.jid)) { chat.read_only = 'true'; chatUpdate.read_only = 'true'; } break; case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_PARTICIPANT_ADD: case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_PARTICIPANT_INVITE: case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_PARTICIPANT_ADD_REQUEST_JOIN: participants = message.messageStubParameters.map(Utils_1.whatsappID); if (participants.includes(this.user.jid) && chat.read_only === 'true') { delete chat.read_only; chatUpdate.read_only = 'false'; } emitParticipantsUpdate('add'); break; case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_CHANGE_ANNOUNCE: const announce = message.messageStubParameters[0] === 'on' ? 'true' : 'false'; emitGroupUpdate({ announce }); break; case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_CHANGE_RESTRICT: const restrict = message.messageStubParameters[0] === 'on' ? 'true' : 'false'; emitGroupUpdate({ restrict }); break; case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_CHANGE_SUBJECT: case Constants_1.WA_MESSAGE_STUB_TYPE.GROUP_CREATE: chat.name = message.messageStubParameters[0]; chatUpdate.name = chat.name; if (chat.metadata) chat.metadata.subject = chat.name; break; } } } this.emit('chat-update', chatUpdate); } /** sets the profile picture of a chat */ async setProfilePicture(chat) { chat.imgUrl = await this.getProfilePicture(chat.jid).catch(err => ''); } on(event, listener) { return super.on(event, listener); } emit(event, ...args) { return super.emit(event, ...args); } } __decorate([ Mutex_1.Mutex(jid => jid) ], WAConnection.prototype, "getProfilePicture", null); exports.WAConnection = WAConnection;
WAConnection
lib.rs
#![allow(non_snake_case, non_camel_case_types)] pub use self::error::*; use std::default::Default; use std::mem; mod error; pub fn SQLITE_STATIC() -> sqlite3_destructor_type { Some(unsafe { mem::transmute(0isize) }) } pub fn
() -> sqlite3_destructor_type { Some(unsafe { mem::transmute(-1isize) }) } /// Run-Time Limit Categories #[repr(i32)] pub enum Limit { /// The maximum size of any string or BLOB or table row, in bytes. SQLITE_LIMIT_LENGTH = SQLITE_LIMIT_LENGTH, /// The maximum length of an SQL statement, in bytes. SQLITE_LIMIT_SQL_LENGTH = SQLITE_LIMIT_SQL_LENGTH, /// The maximum number of columns in a table definition or in the result set /// of a SELECT or the maximum number of columns in an index or in an /// ORDER BY or GROUP BY clause. SQLITE_LIMIT_COLUMN = SQLITE_LIMIT_COLUMN, /// The maximum depth of the parse tree on any expression. SQLITE_LIMIT_EXPR_DEPTH = SQLITE_LIMIT_EXPR_DEPTH, /// The maximum number of terms in a compound SELECT statement. SQLITE_LIMIT_COMPOUND_SELECT = SQLITE_LIMIT_COMPOUND_SELECT, /// The maximum number of instructions in a virtual machine program used to /// implement an SQL statement. SQLITE_LIMIT_VDBE_OP = SQLITE_LIMIT_VDBE_OP, /// The maximum number of arguments on a function. SQLITE_LIMIT_FUNCTION_ARG = SQLITE_LIMIT_FUNCTION_ARG, /// The maximum number of attached databases. SQLITE_LIMIT_ATTACHED = SQLITE_LIMIT_ATTACHED, /// The maximum length of the pattern argument to the LIKE or GLOB /// operators. SQLITE_LIMIT_LIKE_PATTERN_LENGTH = SQLITE_LIMIT_LIKE_PATTERN_LENGTH, /// The maximum index number of any parameter in an SQL statement. SQLITE_LIMIT_VARIABLE_NUMBER = SQLITE_LIMIT_VARIABLE_NUMBER, /// The maximum depth of recursion for triggers. SQLITE_LIMIT_TRIGGER_DEPTH = 10, /// The maximum number of auxiliary worker threads that a single prepared /// statement may start. SQLITE_LIMIT_WORKER_THREADS = 11, } include!(concat!(env!("OUT_DIR"), "/bindgen.rs")); pub type sqlite3_index_constraint = sqlite3_index_info_sqlite3_index_constraint; pub type sqlite3_index_constraint_usage = sqlite3_index_info_sqlite3_index_constraint_usage; impl Default for sqlite3_vtab { fn default() -> Self { unsafe { mem::zeroed() } } } impl Default for sqlite3_vtab_cursor { fn default() -> Self { unsafe { mem::zeroed() } } }
SQLITE_TRANSIENT
_vpn_connections_operations.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class VpnConnectionsOperations(object): """VpnConnectionsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2019_08_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def get( self, resource_group_name, # type: str gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.VpnConnection" """Retrieves the details of a vpn connection. :param resource_group_name: The resource group name of the VpnGateway. :type resource_group_name: str :param gateway_name: The name of the gateway. :type gateway_name: str :param connection_name: The name of the vpn connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VpnConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.VpnConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VpnConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore def _create_or_update_initial( self, resource_group_name, # type: str gateway_name, # type: str connection_name, # type: str vpn_connection_parameters, # type: "_models.VpnConnection" **kwargs # type: Any ): # type: (...) -> "_models.VpnConnection" cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VpnConnection', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VpnConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str gateway_name, # type: str connection_name, # type: str vpn_connection_parameters, # type: "_models.VpnConnection" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.VpnConnection"] """Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the existing connection. :param resource_group_name: The resource group name of the VpnGateway. :type resource_group_name: str :param gateway_name: The name of the gateway. :type gateway_name: str :param connection_name: The name of the connection. :type connection_name: str :param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection. :type vpn_connection_parameters: ~azure.mgmt.network.v2019_08_01.models.VpnConnection :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either VpnConnection or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.VpnConnection] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, gateway_name=gateway_name, connection_name=connection_name, vpn_connection_parameters=vpn_connection_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response):
if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore def _delete_initial( self, resource_group_name, # type: str gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str gateway_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes a vpn connection. :param resource_group_name: The resource group name of the VpnGateway. :type resource_group_name: str :param gateway_name: The name of the gateway. :type gateway_name: str :param connection_name: The name of the connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, gateway_name=gateway_name, connection_name=connection_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore def list_by_vpn_gateway( self, resource_group_name, # type: str gateway_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ListVpnConnectionsResult"] """Retrieves all vpn connections for a particular virtual wan vpn gateway. :param resource_group_name: The resource group name of the VpnGateway. :type resource_group_name: str :param gateway_name: The name of the gateway. :type gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ListVpnConnectionsResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_vpn_gateway.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
deserialized = self._deserialize('VpnConnection', pipeline_response)
assert.go
package validate import ( "encoding/json" "net/url" "reflect" "regexp" "strconv" "strings" "time" ) // IsNotEmpty returns true if value is not nill func IsNotEmpty(value interface{}) bool { if value == nil { return false } if str, ok := value.(string); ok { return len(str) > 0 } if _, ok := value.(bool); ok { return true } if i, ok := value.(int); ok { return i != 0 } if i, ok := value.(uint); ok { return i != 0 } if i, ok := value.(int8); ok { return i != 0 } if i, ok := value.(uint8); ok { return i != 0 } if i, ok := value.(int16); ok { return i != 0 } if i, ok := value.(uint16); ok { return i != 0 } if i, ok := value.(uint32); ok { return i != 0 } if i, ok := value.(int32); ok { return i != 0 } if i, ok := value.(int64); ok { return i != 0 } if i, ok := value.(uint64); ok { return i != 0 } if t, ok := value.(time.Time); ok { tt := time.Time{} return !t.IsZero() && t != tt } v := reflect.ValueOf(value) if v.Kind() == reflect.Slice { return v.Len() > 0 } return true } // IsNumeric check if the value contains only numbers. func IsNumeric(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } _, err := strconv.ParseFloat(str, 64) return err == nil } // IsAlpha check if the value contains only letters (a-zA-Z). Empty string is valid. func IsAlpha(value interface{}) bool { str := toString(value) if !IsNotEmpty(str)
return patternAlpha.MatchString(str) } // IsAlphanumeric check if the value contains only letters and numbers. Empty string is valid. func IsAlphanumeric(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternAlphanumeric.MatchString(str) } // IsAlphanumericSpace check if the value contains only letters, numbers and space. Empty string is valid. func IsAlphanumericSpace(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternAlphanumericSpace.MatchString(str) } // IsAlphaSpace check if the value contains only letters and space. Empty string is valid. func IsAlphaSpace(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternAlphaSpace.MatchString(str) } // IsEmail check if the value is an email. func IsEmail(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternEmail.MatchString(toString(value)) } // IsLatitude check if the value is an latitude. func IsLatitude(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternLatitude.MatchString(toString(value)) } // IsLongitude check if the value is an longitude. func IsLongitude(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } return patternLongitude.MatchString(toString(value)) } // IsURL check if the value is an URL. func IsURL(value interface{}) bool { str := toString(value) if !IsNotEmpty(str) { return true } if str == "" || len(str) >= 2083 || len(str) <= 3 || strings.HasPrefix(str, ".") { return false } u, err := url.Parse(str) if err != nil { return false } if strings.HasPrefix(u.Host, ".") { return false } if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { return false } return patternURL.MatchString(str) } // IsJSON check if the value is valid JSON (note: uses json.Unmarshal). func IsJSON(value interface{}) bool { var js json.RawMessage return json.Unmarshal([]byte(toString(value)), &js) == nil } // IsLowerThanEqual return true if value is greather than equal given number // this will evaluate value of int, lenght of string and number of slices. func IsLowerThanEqual(value interface{}, max interface{}) (res bool) { if value == nil { return true } return dataLength(value) <= dataLength(max) } // IsGreaterThanEqual return true if value is greather than equal given number // this will evaluate value of int, lenght of string and number of slices. func IsGreaterThanEqual(value interface{}, min interface{}) (res bool) { if value == nil { return true } return dataLength(value) >= dataLength(min) } // IsLowerThan return true if value is lower than given number // this will evaluate value of int, lenght of string and number of slices. func IsLowerThan(value interface{}, max interface{}) (res bool) { if value == nil { return true } return dataLength(value) < dataLength(max) } // IsGreaterThan return true if value is greather than given number // this will evaluate value of int, lenght of string and number of slices. func IsGreaterThan(value interface{}, min interface{}) (res bool) { if value == nil { return true } return dataLength(value) > dataLength(min) } // IsOnRange return true if value is greather than equal given min and lowerthan than equal given max // this will evaluate value of int, lenght of string and number of slices. func IsOnRange(value interface{}, min interface{}, max interface{}) bool { return IsGreaterThanEqual(value, min) && IsLowerThanEqual(value, max) } // IsContains check if the value contains the substring. func IsContains(value interface{}, substring string) bool { str := toString(value) if !IsNotEmpty(str) { return true } return strings.Contains(toString(value), substring) } // IsMatches check if value matches the pattern (pattern is regular expression) // In case of error return false func IsMatches(value interface{}, pattern string) bool { str := toString(value) if !IsNotEmpty(str) { return true } match, _ := regexp.MatchString(pattern, toString(value)) return match } // IsSame check if the value is identicaly same with given param func IsSame(value interface{}, param interface{}) bool { value = toString(value) if !IsNotEmpty(value) { return true } return value == toString(param) } // IsIn check if the value is exists in given param func IsIn(value interface{}, param ...string) bool { value = toString(value) if !IsNotEmpty(value) { return true } if len(param) > 0 { for _, v := range param { if v == value { return true } } } return false } // IsNotIn check if the value is not exists in given param func IsNotIn(value interface{}, param ...string) bool { value = toString(value) if !IsNotEmpty(value) { return true } if len(param) > 0 { for _, v := range param { if v == value { return false } } } return true }
{ return true }
lib.rs
#[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } } mod front_of_house; pub use front_of_house::hosting; mod back_of_house { pub struct Breakfast { pub toast: String, fruit: String, } impl Breakfast { pub fn
(toast: &str) -> Breakfast { Breakfast { toast: String::from(toast), fruit: String::from("Apple"), } } } fn fix_incorrect_order() { cook_order(); super::front_of_house::serving::serve_order(); } fn cook_order() {} } pub fn eat_at() { // Absolute path crate::front_of_house::hosting::add_to_waitlist(); // Relative path front_of_house::hosting::add_to_waitlist(); hosting::add_to_waitlist(); let mut meal = back_of_house::Breakfast::summer("Rye"); meal.toast = String::from("Wheat"); println!("Getting toast: {}", meal.toast); // meal.fruit = String::from("Orange"); // Fails }
summer
get_kibana_cluster_plan_responses.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // Code generated by go-swagger; DO NOT EDIT. package clusters_kibana // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "github.com/go-openapi/runtime" strfmt "github.com/go-openapi/strfmt" models "github.com/elastic/cloud-sdk-go/pkg/models" ) // GetKibanaClusterPlanReader is a Reader for the GetKibanaClusterPlan structure. type GetKibanaClusterPlanReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *GetKibanaClusterPlanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewGetKibanaClusterPlanOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 404: result := NewGetKibanaClusterPlanNotFound() if err := result.readResponse(response, consumer, o.formats); err != nil
return nil, result case 412: result := NewGetKibanaClusterPlanPreconditionFailed() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewGetKibanaClusterPlanOK creates a GetKibanaClusterPlanOK with default headers values func NewGetKibanaClusterPlanOK() *GetKibanaClusterPlanOK { return &GetKibanaClusterPlanOK{} } /*GetKibanaClusterPlanOK handles this case with default header values. The cluster has a current applied plan */ type GetKibanaClusterPlanOK struct { /*The date-time when the resource was created (ISO format relative to UTC) */ XCloudResourceCreated string /*The date-time when the resource was last modified (ISO format relative to UTC) */ XCloudResourceLastModified string /*The resource version, which is used to avoid update conflicts with concurrent operations */ XCloudResourceVersion string Payload *models.KibanaClusterPlan } func (o *GetKibanaClusterPlanOK) Error() string { return fmt.Sprintf("[GET /clusters/kibana/{cluster_id}/plan][%d] getKibanaClusterPlanOK %+v", 200, o.Payload) } func (o *GetKibanaClusterPlanOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response header x-cloud-resource-created o.XCloudResourceCreated = response.GetHeader("x-cloud-resource-created") // response header x-cloud-resource-last-modified o.XCloudResourceLastModified = response.GetHeader("x-cloud-resource-last-modified") // response header x-cloud-resource-version o.XCloudResourceVersion = response.GetHeader("x-cloud-resource-version") o.Payload = new(models.KibanaClusterPlan) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewGetKibanaClusterPlanNotFound creates a GetKibanaClusterPlanNotFound with default headers values func NewGetKibanaClusterPlanNotFound() *GetKibanaClusterPlanNotFound { return &GetKibanaClusterPlanNotFound{} } /*GetKibanaClusterPlanNotFound handles this case with default header values. The cluster specified by {cluster_id} cannot be found (code: 'clusters.cluster_not_found') */ type GetKibanaClusterPlanNotFound struct { Payload *models.BasicFailedReply } func (o *GetKibanaClusterPlanNotFound) Error() string { return fmt.Sprintf("[GET /clusters/kibana/{cluster_id}/plan][%d] getKibanaClusterPlanNotFound %+v", 404, o.Payload) } func (o *GetKibanaClusterPlanNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.BasicFailedReply) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewGetKibanaClusterPlanPreconditionFailed creates a GetKibanaClusterPlanPreconditionFailed with default headers values func NewGetKibanaClusterPlanPreconditionFailed() *GetKibanaClusterPlanPreconditionFailed { return &GetKibanaClusterPlanPreconditionFailed{} } /*GetKibanaClusterPlanPreconditionFailed handles this case with default header values. There is not currently applied plan - eg the cluster has not finished provisioning, or the provisioning failed (code: 'clusters.cluster_plan_state_error') */ type GetKibanaClusterPlanPreconditionFailed struct { Payload *models.BasicFailedReply } func (o *GetKibanaClusterPlanPreconditionFailed) Error() string { return fmt.Sprintf("[GET /clusters/kibana/{cluster_id}/plan][%d] getKibanaClusterPlanPreconditionFailed %+v", 412, o.Payload) } func (o *GetKibanaClusterPlanPreconditionFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.BasicFailedReply) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
{ return nil, err }
token_tree.rs
#![allow(clippy::type_complexity)] use crate::parse::{call_node::*, comment::*, flag::*, number::*, operator::*, pipeline::*}; use derive_new::new; use getset::Getters; use nu_errors::{ParseError, ShellError}; use nu_protocol::{ShellTypeName, SpannedTypeName}; use nu_source::{ b, DebugDocBuilder, HasSpan, PrettyDebugWithSource, Span, Spanned, SpannedItem, Text, }; use std::borrow::Cow; use std::ops::Deref; #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)] pub enum Token { Number(RawNumber), CompareOperator(CompareOperator), EvaluationOperator(EvaluationOperator), String(Span), Variable(Span), ItVariable(Span), ExternalCommand(Span), ExternalWord, GlobPattern, Bare, Garbage, Call(CallNode), Delimited(DelimitedNode), Pipeline(Pipeline), Flag(Flag), Comment(Comment), Whitespace, Separator, } macro_rules! token_type { (struct $name:tt (desc: $desc:tt) -> $out:ty { |$span:ident, $pat:pat| => $do:expr }) => { pub struct $name; impl TokenType for $name { type Output = $out; fn desc(&self) -> Cow<'static, str> { Cow::Borrowed($desc) } fn extract_token_value( &self, token: &SpannedToken, err: ParseErrorFn<$out>, ) -> Result<$out, ParseError> { let $span = token.span(); match *token.unspanned() { $pat => Ok($do), _ => err(), } } } }; (struct $name:tt (desc: $desc:tt) -> $out:ty { $pat:pat => $do:expr }) => { pub struct $name; impl TokenType for $name { type Output = $out; fn desc(&self) -> Cow<'static, str> { Cow::Borrowed($desc) } fn extract_token_value( &self, token: &SpannedToken, err: ParseErrorFn<$out>, ) -> Result<$out, ParseError> { match token.unspanned().clone() { $pat => Ok($do), _ => err(), } } } }; } pub type ParseErrorFn<'a, T> = &'a dyn Fn() -> Result<T, ParseError>; token_type!(struct IntType (desc: "integer") -> RawNumber { Token::Number(number @ RawNumber::Int(_)) => number }); token_type!(struct DecimalType (desc: "decimal") -> RawNumber { Token::Number(number @ RawNumber::Decimal(_)) => number }); token_type!(struct StringType (desc: "string") -> (Span, Span) { |outer, Token::String(inner)| => (inner, outer) }); token_type!(struct BareType (desc: "word") -> Span { |span, Token::Bare| => span }); token_type!(struct DotType (desc: "dot") -> Span { |span, Token::EvaluationOperator(EvaluationOperator::Dot)| => span }); token_type!(struct DotDotType (desc: "dotdot") -> Span { |span, Token::EvaluationOperator(EvaluationOperator::DotDot)| => span }); token_type!(struct CompareOperatorType (desc: "compare operator") -> (Span, CompareOperator) { |span, Token::CompareOperator(operator)| => (span, operator) }); token_type!(struct ExternalWordType (desc: "external word") -> Span { |span, Token::ExternalWord| => span }); token_type!(struct ExternalCommandType (desc: "external command") -> (Span, Span) { |outer, Token::ExternalCommand(inner)| => (inner, outer) }); token_type!(struct CommentType (desc: "comment") -> (Comment, Span) { |outer, Token::Comment(comment)| => (comment, outer) }); token_type!(struct SeparatorType (desc: "separator") -> Span { |span, Token::Separator| => span }); token_type!(struct WhitespaceType (desc: "whitespace") -> Span { |span, Token::Whitespace| => span }); token_type!(struct WordType (desc: "word") -> Span { |span, Token::Bare| => span }); token_type!(struct ItVarType (desc: "$it") -> (Span, Span) { |outer, Token::ItVariable(inner)| => (inner, outer) }); token_type!(struct VarType (desc: "variable") -> (Span, Span) { |outer, Token::Variable(inner)| => (inner, outer) }); token_type!(struct PipelineType (desc: "pipeline") -> Pipeline { Token::Pipeline(pipeline) => pipeline }); token_type!(struct BlockType (desc: "block") -> DelimitedNode { Token::Delimited(block @ DelimitedNode { delimiter: Delimiter::Brace, .. }) => block }); token_type!(struct SquareType (desc: "square") -> DelimitedNode { Token::Delimited(square @ DelimitedNode { delimiter: Delimiter::Square, .. }) => square }); pub trait TokenType { type Output; fn desc(&self) -> Cow<'static, str>; fn extract_token_value( &self, token: &SpannedToken, err: ParseErrorFn<Self::Output>, ) -> Result<Self::Output, ParseError>; } impl Token { pub fn into_spanned(self, span: impl Into<Span>) -> SpannedToken { SpannedToken { unspanned: self, span: span.into(), } } } #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Getters)] pub struct SpannedToken { #[get = "pub"] unspanned: Token, span: Span, } impl Deref for SpannedToken { type Target = Token; fn deref(&self) -> &Self::Target { &self.unspanned } } impl HasSpan for SpannedToken { fn span(&self) -> Span { self.span } } impl ShellTypeName for SpannedToken { fn type_name(&self) -> &'static str { self.unspanned.type_name() } } impl PrettyDebugWithSource for SpannedToken { fn pretty_debug(&self, source: &str) -> DebugDocBuilder { match self.unspanned() { Token::Number(number) => number.pretty_debug(source), Token::CompareOperator(operator) => operator.pretty_debug(source), Token::EvaluationOperator(operator) => operator.pretty_debug(source), Token::String(_) | Token::GlobPattern | Token::Bare => { b::primitive(self.span.slice(source)) } Token::Variable(_) => b::var(self.span.slice(source)), Token::ItVariable(_) => b::keyword(self.span.slice(source)), Token::ExternalCommand(_) => b::description(self.span.slice(source)), Token::ExternalWord => b::description(self.span.slice(source)), Token::Call(call) => call.pretty_debug(source), Token::Delimited(delimited) => delimited.pretty_debug(source), Token::Pipeline(pipeline) => pipeline.pretty_debug(source), Token::Flag(flag) => flag.pretty_debug(source), Token::Garbage => b::error(self.span.slice(source)), Token::Whitespace => b::typed( "whitespace", b::description(format!("{:?}", self.span.slice(source))), ), Token::Separator => b::typed( "separator", b::description(format!("{:?}", self.span.slice(source))), ), Token::Comment(comment) => { b::typed("comment", b::description(comment.text.slice(source))) } } } } impl ShellTypeName for Token { fn type_name(&self) -> &'static str { match self { Token::Number(_) => "number", Token::CompareOperator(_) => "comparison operator", Token::EvaluationOperator(EvaluationOperator::Dot) => "dot", Token::EvaluationOperator(EvaluationOperator::DotDot) => "dot dot", Token::String(_) => "string", Token::Variable(_) => "variable", Token::ItVariable(_) => "it variable", Token::ExternalCommand(_) => "external command", Token::ExternalWord => "external word", Token::GlobPattern => "glob pattern", Token::Bare => "word", Token::Call(_) => "command", Token::Delimited(d) => d.type_name(), Token::Pipeline(_) => "pipeline", Token::Flag(_) => "flag", Token::Garbage => "garbage", Token::Whitespace => "whitespace", Token::Separator => "separator", Token::Comment(_) => "comment", } } } impl From<&SpannedToken> for Span { fn from(token: &SpannedToken) -> Span { token.span } } impl SpannedToken { pub fn as_external_arg(&self, source: &Text) -> String { self.span().slice(source).to_string() } pub fn source<'a>(&self, source: &'a Text) -> &'a str { self.span().slice(source) } pub fn get_variable(&self) -> Result<(Span, Span), ShellError> { match self.unspanned() { Token::Variable(inner_span) => Ok((self.span(), *inner_span)), _ => Err(ShellError::type_error("variable", self.spanned_type_name())), } } pub fn is_bare(&self) -> bool { match self.unspanned() { Token::Bare => true, _ => false, } } pub fn is_string(&self) -> bool { match self.unspanned() { Token::String(_) => true, _ => false, } } pub fn is_number(&self) -> bool { match self.unspanned() { Token::Number(_) => true, _ => false, } } pub fn is_int(&self) -> bool { match self.unspanned() { Token::Number(RawNumber::Int(_)) => true, _ => false, } } pub fn as_string(&self) -> Option<(Span, Span)> { match self.unspanned() { Token::String(inner_span) => Some((self.span(), *inner_span)), _ => None, } } pub fn is_pattern(&self) -> bool { match self.unspanned() { Token::GlobPattern => true, _ => false, } } pub fn is_word(&self) -> bool { match self.unspanned() { Token::Bare => true, _ => false, } } pub fn is_dot(&self) -> bool { match self.unspanned() { Token::EvaluationOperator(EvaluationOperator::Dot) => true, _ => false, } } pub fn is_separator(&self) -> bool { match self.unspanned() { Token::Separator => true, _ => false, } } pub fn as_block(&self) -> Option<(Spanned<&[SpannedToken]>, (Span, Span))> { match self.unspanned() { Token::Delimited(DelimitedNode { delimiter, children, spans, }) if *delimiter == Delimiter::Brace => { Some(((&children[..]).spanned(self.span()), *spans)) } _ => None, } } pub fn is_external(&self) -> bool { match self.unspanned() { Token::ExternalCommand(..) => true, _ => false, } } pub(crate) fn as_flag(&self, value: &str, short: Option<char>, source: &Text) -> Option<Flag> { match self.unspanned() { Token::Flag(flag) => { let name = flag.name().slice(source); match flag.kind { FlagKind::Longhand if value == name => Some(*flag), FlagKind::Shorthand => { if let Some(short_hand) = short { if short_hand.to_string() == name { return Some(*flag); } } None } _ => None, } } _ => None, } } pub fn as_pipeline(&self) -> Result<Pipeline, ParseError>
pub fn is_whitespace(&self) -> bool { match self.unspanned() { Token::Whitespace => true, _ => false, } } } #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Getters, new)] #[get = "pub(crate)"] pub struct DelimitedNode { pub(crate) delimiter: Delimiter, pub(crate) spans: (Span, Span), pub(crate) children: Vec<SpannedToken>, } impl HasSpan for DelimitedNode { fn span(&self) -> Span { self.spans.0.until(self.spans.1) } } impl PrettyDebugWithSource for DelimitedNode { fn pretty_debug(&self, source: &str) -> DebugDocBuilder { b::delimit( self.delimiter.open(), b::intersperse( self.children.iter().map(|child| child.pretty_debug(source)), b::space(), ), self.delimiter.close(), ) } } impl DelimitedNode { pub fn type_name(&self) -> &'static str { match self.delimiter { Delimiter::Brace => "braced expression", Delimiter::Paren => "parenthesized expression", Delimiter::Square => "array literal or index operator", } } } #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] pub enum Delimiter { Paren, Brace, Square, } impl Delimiter { pub(crate) fn open(self) -> &'static str { match self { Delimiter::Paren => "(", Delimiter::Brace => "{", Delimiter::Square => "[", } } pub(crate) fn close(self) -> &'static str { match self { Delimiter::Paren => ")", Delimiter::Brace => "}", Delimiter::Square => "]", } } } #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Getters, new)] #[get = "pub(crate)"] pub struct PathNode { head: Box<SpannedToken>, tail: Vec<SpannedToken>, } #[cfg(test)] impl SpannedToken { pub fn expect_external(&self) -> Span { match self.unspanned() { Token::ExternalCommand(span) => *span, _ => panic!( "Only call expect_external if you checked is_external first, found {:?}", self ), } } pub fn expect_number(&self) -> RawNumber { match self.unspanned() { Token::Number(raw_number) => *raw_number, other => panic!("Expected number, found {:?}", other), } } pub fn expect_string(&self) -> (Span, Span) { match self.unspanned() { Token::String(inner_span) => (self.span(), *inner_span), other => panic!("Expected string, found {:?}", other), } } pub fn expect_list(&self) -> Spanned<Vec<SpannedToken>> { match self.unspanned() { Token::Pipeline(pipeline) => pipeline .parts() .iter() .flat_map(|part| part.tokens()) .cloned() .collect::<Vec<SpannedToken>>() .spanned(self.span()), _ => panic!("Expected list, found {:?}", self), } } pub fn expect_pattern(&self) -> Span { match self.unspanned() { Token::GlobPattern => self.span(), _ => panic!("Expected pattern, found {:?}", self), } } pub fn expect_var(&self) -> (Span, Span) { match self.unspanned() { Token::Variable(inner_span) => (self.span(), *inner_span), Token::ItVariable(inner_span) => (self.span(), *inner_span), other => panic!("Expected var, found {:?}", other), } } pub fn expect_dot(&self) -> Span { match self.unspanned() { Token::EvaluationOperator(EvaluationOperator::Dot) => self.span(), other => panic!("Expected dot, found {:?}", other), } } pub fn expect_bare(&self) -> Span { match self.unspanned() { Token::Bare => self.span(), _ => panic!("Expected bare, found {:?}", self), } } }
{ match self.unspanned() { Token::Pipeline(pipeline) => Ok(pipeline.clone()), _ => Err(ParseError::mismatch("pipeline", self.spanned_type_name())), } }