file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
volume_mount.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Classes for interacting with Kubernetes API """ import copy import kubernetes.client.models as k8s from airflow.kubernetes.k8s_model import K8SModel class VolumeMount(K8SModel): """ Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to running container. :param name: the name of the volume mount :type name: str :param mount_path: :type mount_path: str :param sub_path: subpath within the volume mount :type sub_path: Optional[str] :param read_only: whether to access pod with read-only mode :type read_only: bool """ def __init__(self, name, mount_path, sub_path, read_only): self.name = name self.mount_path = mount_path self.sub_path = sub_path self.read_only = read_only def to_k8s_client_obj(self) -> k8s.V1VolumeMount:
def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod: """ Attaches to pod :return Copy of the Pod object """ cp_pod = copy.deepcopy(pod) volume_mount = self.to_k8s_client_obj() cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or [] cp_pod.spec.containers[0].volume_mounts.append(volume_mount) return cp_pod
""" Converts to k8s object. :return Volume Mount k8s object """ return k8s.V1VolumeMount( name=self.name, mount_path=self.mount_path, sub_path=self.sub_path, read_only=self.read_only )
plan_expression_rewriter.rs
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. use common_exception::Result; use crate::Expression; /// Trait for potentially recursively rewriting an [`Expr`] expression /// tree. When passed to `Expr::rewrite`, `ExprVisitor::mutate` is /// invoked recursively on all nodes of an expression tree. See the /// comments on `Expr::rewrite` for details on its use pub trait ExprRewriter: Sized { /// Invoked before any children of `expr` are rewritten / /// visited. Default implementation returns `Ok(true)` fn pre_visit(&mut self, _expr: &Expression) -> Result<bool> { Ok(true) } /// Invoked after all children of `expr` have been mutated and /// returns a potentially modified expr. fn mutate(&mut self, expr: Expression) -> Result<Expression>; } impl Expression { /// Performs a depth first walk of an expression and its children /// to rewrite an expression, consuming `self` producing a new /// [`Expr`]. ///
/// Implements a modified version of the [visitor /// pattern](https://en.wikipedia.org/wiki/Visitor_pattern) to /// separate algorithms from the structure of the `Expr` tree and /// make it easier to write new, efficient expression /// transformation algorithms. /// /// For an expression tree such as /// ```text /// BinaryExpr (GT) /// left: Column("foo") /// right: Column("bar") /// ``` /// /// The nodes are visited using the following order /// ```text /// pre_visit(BinaryExpr(GT)) /// pre_visit(Column("foo")) /// mutatate(Column("foo")) /// pre_visit(Column("bar")) /// mutate(Column("bar")) /// mutate(BinaryExpr(GT)) /// ``` /// /// If an Err result is returned, recursion is stopped immediately /// /// If [`false`] is returned on a call to pre_visit, no /// children of that expression are visited, nor is mutate /// called on that expression /// pub fn rewrite<R>(self, rewriter: &mut R) -> Result<Self> where R: ExprRewriter { if !rewriter.pre_visit(&self)? { return Ok(self); }; // recurse into all sub expressions(and cover all expression types) let expr = match self { Expression::Alias(name, expr) => { let expr = expr.rewrite(rewriter)?; Expression::Alias(name, Box::new(expr)) } Expression::BinaryExpression { op, left, right } => Expression::BinaryExpression { op, left: Box::new(left.rewrite(rewriter)?), right: Box::new(right.rewrite(rewriter)?), }, Expression::ScalarFunction { op, args } => { let mut new_args = Vec::with_capacity(args.len()); for arg in args { new_args.push(arg.rewrite(rewriter)?); } Expression::ScalarFunction { op, args: new_args } } Expression::AggregateFunction { op, args } => { let mut new_args = Vec::with_capacity(args.len()); for arg in args { new_args.push(arg.rewrite(rewriter)?); } Expression::AggregateFunction { op, args: new_args } } Expression::Cast { expr, data_type } => { let expr = expr.rewrite(rewriter)?; Expression::Cast { expr: Box::new(expr), data_type, } } Expression::Sort { expr, asc, nulls_first, } => { let expr = expr.rewrite(rewriter)?; Expression::Sort { expr: Box::new(expr), asc, nulls_first, } } _ => self, }; // now rewrite this expression itself rewriter.mutate(expr) } }
analyze_noise.py
import sys,os,glob try: import pyUSRP as u except ImportError: try: sys.path.append('..') import pyUSRP as u except ImportError: print "Cannot find the pyUSRP package" import argparse def run(backend, files, welch, dbc):
cryostat_attenuation=0, auto_open=True, output_filename=None, add_info = ["decimation: 100x fs: 100Msps","loopback decimation 100x","decimation: OFF fs: 1Msps"]) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Test the basic VNA functionality.') parser.add_argument('--folder', '-fn', help='Name of the folder in which the data are stored', type=str, default = "data") parser.add_argument('--backend', '-b', help='backend to use for plotting', type=str, default= "matplotlib") parser.add_argument('--welch', '-w', help='Whelch factor relative to timestream length so that welch factor is len(timestream)/this_arg', type=int, default= 5) parser.add_argument('--dbc', '-dbc', help='Analyze and plot in dBc or not', action="store_true") args = parser.parse_args() try: os.mkdir(args.folder) except OSError: pass os.chdir(args.folder) files = glob.glob("USRP_Noise*.h5") run(backend = args.backend, files = files, welch = args.welch, dbc = args.dbc)
for f in files: u.calculate_noise(f, verbose = True, welch = max(welch,1), dbc = dbc, clip = 0.1) print u.plot_noise_spec(files, channel_list=None, max_frequency=None, title_info=None, backend=backend,
LoggerFactory.ts
import winston from 'winston'; import { Logger, createLogger, format, transports } from 'winston'; export { winston }; const { combine, label, printf } = format; export default class
{ public static getInstance(source = 'POLKABOT'): Logger { const consoleFormat = printf(({ level, message, label, _timestamp, meta }) => { return `[${label}${meta ? '|' + meta.source : ''}]\t${level} ${message}`; }); const productionFormat = printf(({ level, message, label, timestamp, meta }) => { return `${timestamp} [${label}${meta ? '|' + meta.source : ''}]\t${level} ${message}`; }); const instance = createLogger({ level: process.env.LOG_LEVEL, format: combine( label({ label: source }), format.splat(), productionFormat, ), transports: [ new transports.File({ filename: 'error.log', level: 'error' }), new transports.File({ filename: 'combined.log' }) ] }); if (process.env.NODE_ENV !== 'production') { instance.add(new transports.Console({ format: combine( format.colorize(), format.align(), format.simple(), label({ label: source }), format.splat(), consoleFormat, ) })); } return instance; } }
LoggerFactory
rw_internal_test.go
// Copyright (c) 2019 The Jaeger Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package spanstore import ( "context" "encoding/binary" "math/rand" "testing" "time" "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/storage/spanstore" ) func TestEncodingTypes(t *testing.T) { // JSON encoding runWithBadger(t, func(store *badger.DB, t *testing.T) { testSpan := createDummySpan() cache := NewCacheStore(store, time.Duration(1*time.Hour), true) sw := NewSpanWriter(store, cache, time.Duration(1*time.Hour)) rw := NewTraceReader(store, cache) sw.encodingType = jsonEncoding err := sw.WriteSpan(context.Background(), &testSpan) assert.NoError(t, err) tr, err := rw.GetTrace(context.Background(), model.TraceID{Low: 0, High: 1}) assert.NoError(t, err) assert.Equal(t, 1, len(tr.Spans)) }) // Unknown encoding write runWithBadger(t, func(store *badger.DB, t *testing.T) { testSpan := createDummySpan() cache := NewCacheStore(store, time.Duration(1*time.Hour), true) sw := NewSpanWriter(store, cache, time.Duration(1*time.Hour)) // rw := NewTraceReader(store, cache) sw.encodingType = 0x04 err := sw.WriteSpan(context.Background(), &testSpan) assert.EqualError(t, err, "unknown encoding type: 0x04") }) // Unknown encoding reader runWithBadger(t, func(store *badger.DB, t *testing.T) { testSpan := createDummySpan() cache := NewCacheStore(store, time.Duration(1*time.Hour), true) sw := NewSpanWriter(store, cache, time.Duration(1*time.Hour)) rw := NewTraceReader(store, cache) err := sw.WriteSpan(context.Background(), &testSpan) assert.NoError(t, err) startTime := model.TimeAsEpochMicroseconds(testSpan.StartTime) key, _, _ := createTraceKV(&testSpan, protoEncoding, startTime) e := &badger.Entry{ Key: key, ExpiresAt: uint64(time.Now().Add(1 * time.Hour).Unix()), } e.UserMeta = byte(0x04) store.Update(func(txn *badger.Txn) error { txn.SetEntry(e) return nil }) _, err = rw.GetTrace(context.Background(), model.TraceID{Low: 0, High: 1}) assert.EqualError(t, err, "unknown encoding type: 0x04") }) } func TestDecodeErrorReturns(t *testing.T) { garbage := []byte{0x08} _, err := decodeValue(garbage, protoEncoding) assert.Error(t, err) _, err = decodeValue(garbage, jsonEncoding) assert.Error(t, err) } func TestDuplicateTraceIDDetection(t *testing.T) { runWithBadger(t, func(store *badger.DB, t *testing.T) { testSpan := createDummySpan() cache := NewCacheStore(store, time.Duration(1*time.Hour), true) sw := NewSpanWriter(store, cache, time.Duration(1*time.Hour)) rw := NewTraceReader(store, cache) origStartTime := testSpan.StartTime traceCount := 128 for k := 0; k < traceCount; k++ { testSpan.TraceID.Low = rand.Uint64() for i := 0; i < 32; i++ { testSpan.SpanID = model.SpanID(rand.Uint64()) testSpan.StartTime = origStartTime.Add(time.Duration(rand.Int31n(8000)) * time.Millisecond) err := sw.WriteSpan(context.Background(), &testSpan) assert.NoError(t, err) } } traces, err := rw.FindTraceIDs(context.Background(), &spanstore.TraceQueryParameters{ ServiceName: "service", NumTraces: 256, // Default is 100, we want to fetch more than there should be StartTimeMax: time.Now().Add(time.Hour), StartTimeMin: testSpan.StartTime.Add(-1 * time.Hour), }) assert.NoError(t, err) assert.Equal(t, 128, len(traces)) }) } func createDummySpan() model.Span { tid := time.Now() dummyKv := []model.KeyValue{ { Key: "key", VType: model.StringType, VStr: "value", }, } testSpan := model.Span{ TraceID: model.TraceID{ Low: uint64(0), High: 1, }, SpanID: model.SpanID(0), OperationName: "operation", Process: &model.Process{ ServiceName: "service", Tags: dummyKv, }, StartTime: tid.Add(time.Duration(1 * time.Millisecond)), Duration: time.Duration(1 * time.Millisecond), Tags: dummyKv, Logs: []model.Log{ {
}, }, } return testSpan } func TestMergeJoin(t *testing.T) { assert := assert.New(t) // Test equals left := make([][]byte, 16) right := make([][]byte, 16) for i := 0; i < 16; i++ { left[i] = make([]byte, 4) binary.BigEndian.PutUint32(left[i], uint32(i)) right[i] = make([]byte, 4) binary.BigEndian.PutUint32(right[i], uint32(i)) } merged := mergeJoinIds(left, right) assert.Equal(16, len(merged)) // Check order assert.Equal(uint32(15), binary.BigEndian.Uint32(merged[15])) // Test simple non-equality different size merged = mergeJoinIds(left[1:2], right[13:]) assert.Empty(merged) // Different size, some equalities merged = mergeJoinIds(left[0:3], right[1:7]) assert.Equal(2, len(merged)) assert.Equal(uint32(2), binary.BigEndian.Uint32(merged[1])) }
Timestamp: tid, Fields: dummyKv,
broker.go
/* Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package v1 import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" v1 "knative.dev/eventing/pkg/apis/eventing/v1" scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme" ) // BrokersGetter has a method to return a BrokerInterface. // A group's client should implement this interface. type BrokersGetter interface { Brokers(namespace string) BrokerInterface } // BrokerInterface has methods to work with Broker resources. type BrokerInterface interface { Create(*v1.Broker) (*v1.Broker, error) Update(*v1.Broker) (*v1.Broker, error) UpdateStatus(*v1.Broker) (*v1.Broker, error) Delete(name string, options *metav1.DeleteOptions) error DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error Get(name string, options metav1.GetOptions) (*v1.Broker, error) List(opts metav1.ListOptions) (*v1.BrokerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Broker, err error) BrokerExpansion } // brokers implements BrokerInterface type brokers struct { client rest.Interface ns string } // newBrokers returns a Brokers func
(c *EventingV1Client, namespace string) *brokers { return &brokers{ client: c.RESTClient(), ns: namespace, } } // Get takes name of the broker, and returns the corresponding broker object, and an error if there is any. func (c *brokers) Get(name string, options metav1.GetOptions) (result *v1.Broker, err error) { result = &v1.Broker{} err = c.client.Get(). Namespace(c.ns). Resource("brokers"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(). Into(result) return } // List takes label and field selectors, and returns the list of Brokers that match those selectors. func (c *brokers) List(opts metav1.ListOptions) (result *v1.BrokerList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } result = &v1.BrokerList{} err = c.client.Get(). Namespace(c.ns). Resource("brokers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Do(). Into(result) return } // Watch returns a watch.Interface that watches the requested brokers. func (c *brokers) Watch(opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("brokers"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Watch() } // Create takes the representation of a broker and creates it. Returns the server's representation of the broker, and an error, if there is any. func (c *brokers) Create(broker *v1.Broker) (result *v1.Broker, err error) { result = &v1.Broker{} err = c.client.Post(). Namespace(c.ns). Resource("brokers"). Body(broker). Do(). Into(result) return } // Update takes the representation of a broker and updates it. Returns the server's representation of the broker, and an error, if there is any. func (c *brokers) Update(broker *v1.Broker) (result *v1.Broker, err error) { result = &v1.Broker{} err = c.client.Put(). Namespace(c.ns). Resource("brokers"). Name(broker.Name). Body(broker). Do(). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *brokers) UpdateStatus(broker *v1.Broker) (result *v1.Broker, err error) { result = &v1.Broker{} err = c.client.Put(). Namespace(c.ns). Resource("brokers"). Name(broker.Name). SubResource("status"). Body(broker). Do(). Into(result) return } // Delete takes name of the broker and deletes it. Returns an error if one occurs. func (c *brokers) Delete(name string, options *metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("brokers"). Name(name). Body(options). Do(). Error() } // DeleteCollection deletes a collection of objects. func (c *brokers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). Resource("brokers"). VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). Do(). Error() } // Patch applies the patch and returns the patched broker. func (c *brokers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Broker, err error) { result = &v1.Broker{} err = c.client.Patch(pt). Namespace(c.ns). Resource("brokers"). SubResource(subresources...). Name(name). Body(data). Do(). Into(result) return }
newBrokers
instance_explanation.py
from factory import vectorizer_factory from sklearn.base import TransformerMixin from sklearn.pipeline import make_pipeline from lime.lime_text import LimeTextExplainer class VectorTransformer(TransformerMixin): def __init__(self, vectorizer_name): self.vectorizer_name = vectorizer_name def fit(self,X, y=None): pass def transform(self, sentence_list, y=None): return vectorizer_factory.get_vectorized_text(sentence_list,self.vectorizer_name) def get_pipeline_for_classification(feature_transformer, trained_model): return make_pipeline(feature_transformer, trained_model) def get_explanation_for_instance(text_string,classifier_function, class_list, max_num_features_to_show=10, file_to_save='explain.html'): explainer = LimeTextExplainer(class_names=class_list,random_state=42) explained_instance = explainer.explain_instance(text_string, classifier_function.predict_proba, num_features=max_num_features_to_show, top_labels=len(class_list)) explained_instance.save_to_file(file_to_save) return explained_instance.as_list()
transport.go
package github import ( "bytes" "io" "io/ioutil" "log" "net/http" "sync" "time" "github.com/google/go-github/v38/github" ) const ( ctxEtag = ctxEtagType("etag") ctxId = ctxIdType("id") writeDelay = 1 * time.Second ) // ctxIdType is used to avoid collisions between packages using context type ctxIdType string // ctxEtagType is used to avoid collisions between packages using context type ctxEtagType string // etagTransport allows saving API quota by passing previously stored Etag // available via context to request headers type etagTransport struct { transport http.RoundTripper } func (ett *etagTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx := req.Context() etag := ctx.Value(ctxEtag) if v, ok := etag.(string); ok { req.Header.Set("If-None-Match", v) } return ett.transport.RoundTrip(req) } func NewEtagTransport(rt http.RoundTripper) *etagTransport { return &etagTransport{transport: rt} } // rateLimitTransport implements GitHub's best practices // for avoiding rate limits // https://developer.github.com/v3/guides/best-practices-for-integrators/#dealing-with-abuse-rate-limits type rateLimitTransport struct { transport http.RoundTripper delayNextRequest bool m sync.Mutex } func (rlt *rateLimitTransport) RoundTrip(req *http.Request) (*http.Response, error) { // Make requests for a single user or client ID serially // This is also necessary for safely saving // and restoring bodies between retries below rlt.lock(req) // If you're making a large number of POST, PATCH, PUT, or DELETE requests // for a single user or client ID, wait at least one second between each request. if rlt.delayNextRequest { log.Printf("[DEBUG] Sleeping %s between write operations", writeDelay) time.Sleep(writeDelay) } rlt.delayNextRequest = isWriteMethod(req.Method) resp, err := rlt.transport.RoundTrip(req) if err != nil { rlt.unlock(req) return resp, err } // Make response body accessible for retries & debugging // (work around bug in GitHub SDK) // See https://github.com/google/go-github/pull/986 r1, r2, err := drainBody(resp.Body) if err != nil { return nil, err } resp.Body = r1 ghErr := github.CheckResponse(resp) resp.Body = r2 // When you have been limited, use the Retry-After response header to slow down. if arlErr, ok := ghErr.(*github.AbuseRateLimitError); ok { rlt.delayNextRequest = false retryAfter := arlErr.GetRetryAfter() log.Printf("[DEBUG] Abuse detection mechanism triggered, sleeping for %s before retrying", retryAfter) time.Sleep(retryAfter) rlt.unlock(req) return rlt.RoundTrip(req) } if rlErr, ok := ghErr.(*github.RateLimitError); ok { rlt.delayNextRequest = false retryAfter := time.Until(rlErr.Rate.Reset.Time) log.Printf("[DEBUG] Rate limit %d reached, sleeping for %s (until %s) before retrying", rlErr.Rate.Limit, retryAfter, time.Now().Add(retryAfter)) time.Sleep(retryAfter) rlt.unlock(req) return rlt.RoundTrip(req) } rlt.unlock(req) return resp, nil } func (rlt *rateLimitTransport) lock(req *http.Request) { ctx := req.Context() log.Printf("[TRACE] Acquiring lock for GitHub API request (%q)", ctx.Value(ctxId)) rlt.m.Lock() } func (rlt *rateLimitTransport) unlock(req *http.Request) { ctx := req.Context() log.Printf("[TRACE] Releasing lock for GitHub API request (%q)", ctx.Value(ctxId)) rlt.m.Unlock() } func
(rt http.RoundTripper) *rateLimitTransport { return &rateLimitTransport{transport: rt} } // drainBody reads all of b to memory and then returns two equivalent // ReadClosers yielding the same bytes. func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { if b == http.NoBody { // No copying needed. Preserve the magic sentinel meaning of NoBody. return http.NoBody, http.NoBody, nil } var buf bytes.Buffer if _, err = buf.ReadFrom(b); err != nil { return nil, b, err } if err = b.Close(); err != nil { return nil, b, err } return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil } func isWriteMethod(method string) bool { switch method { case "POST", "PATCH", "PUT", "DELETE": return true } return false }
NewRateLimitTransport
test_unit_service_client.py
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # $Revision: #4 $ import unittest from unittest import mock import cgf_service_client class
(unittest.TestCase): def test_service_client_imports(self): self.assertIsNotNone(cgf_service_client.Data) self.assertIsNotNone(cgf_service_client.Path) self.assertIsNotNone(cgf_service_client.HttpError) self.assertIsNotNone(cgf_service_client.ClientError) self.assertIsNotNone(cgf_service_client.NotFoundError) self.assertIsNotNone(cgf_service_client.NotAllowedError) self.assertIsNotNone(cgf_service_client.ServerError) @mock.patch('cgf_service_client.Path') def test_for_url(self, mock_Path): client = cgf_service_client.for_url('http://example.com', A = 10, B = 20) self.assertIs(client, mock_Path.return_value) mock_Path.assert_called_once_with('http://example.com', A = 10, B = 20) if __name__ == '__main__': unittest.main()
UnitTest_CloudGemFramework_ServiceClient_service_client
overlap.py
import math from typing import Iterable from .base import BaseMeasure class OverlapMeasure(BaseMeasure): def __init__(self, db=None, maxsize: int = 100) -> None: super().__init__() if db: self.maxsize = db.max_feature_size() else: self.maxsize = maxsize def min_feature_size(self, query_size, alpha) -> int: # return 1 # Not sure the below isn't sufficient return math.floor(query_size * alpha) or 1 def max_feature_size(self, query_size, alpha) -> int: return self.maxsize def minimum_common_feature_count( self, query_size: int, y_size: int, alpha: float ) -> int: return int(math.ceil(alpha * min(query_size, y_size))) def similarity(self, X: Iterable[str], Y: Iterable[str]) -> int: return min(len(set(X)), len(set(Y))) class LeftOverlapMeasure(BaseMeasure): def __init__(self, db=None, maxsize: int = 100) -> None: super().__init__() if db: self.maxsize = db.max_feature_size() else: self.maxsize = maxsize def min_feature_size(self, query_size, alpha) -> int: return math.floor(query_size * alpha) or 1 def max_feature_size(self, query_size, alpha) -> int:
def minimum_common_feature_count( self, query_size: int, y_size: int, alpha: float ) -> int: return math.floor(query_size * alpha) or 1 def similarity(self, X: Iterable[str], Y: Iterable[str]) -> float: return 1 - len(set(X) - set(Y)) / len(set(X))
return self.maxsize
s.py
# This code is part of Qiskit.
# # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """The S and Sdg gate.""" import numpy from qiskit.qasm import pi from qiskit.circuit.gate import Gate from qiskit.circuit.quantumregister import QuantumRegister class SGate(Gate): r"""Single qubit S gate (Z**0.5). It induces a :math:`\pi/2` phase, and is sometimes called the P gate (phase). This is a Clifford gate and a square-root of Pauli-Z. **Matrix Representation:** .. math:: S = \begin{pmatrix} 1 & 0 \\ 0 & i \end{pmatrix} **Circuit symbol:** .. parsed-literal:: ┌───┐ q_0: ┤ S ├ └───┘ Equivalent to a :math:`\pi/2` radian rotation about the Z axis. """ def __init__(self, label=None): """Create new S gate.""" super().__init__('s', 1, [], label=label) def _define(self): """ gate s a { u1(pi/2) a; } """ # pylint: disable=cyclic-import from qiskit.circuit.quantumcircuit import QuantumCircuit from .u1 import U1Gate q = QuantumRegister(1, 'q') qc = QuantumCircuit(q, name=self.name) rules = [ (U1Gate(pi / 2), [q[0]], []) ] qc._data = rules self.definition = qc def inverse(self): """Return inverse of S (SdgGate).""" return SdgGate() def to_matrix(self): """Return a numpy.array for the S gate.""" return numpy.array([[1, 0], [0, 1j]], dtype=complex) class SdgGate(Gate): r"""Single qubit S-adjoint gate (~Z**0.5). It induces a :math:`-\pi/2` phase. This is a Clifford gate and a square-root of Pauli-Z. **Matrix Representation:** .. math:: Sdg = \begin{pmatrix} 1 & 0 \\ 0 & -i \end{pmatrix} **Circuit symbol:** .. parsed-literal:: ┌─────┐ q_0: ┤ Sdg ├ └─────┘ Equivalent to a :math:`\pi/2` radian rotation about the Z axis. """ def __init__(self, label=None): """Create new Sdg gate.""" super().__init__('sdg', 1, [], label=label) def _define(self): """ gate sdg a { u1(-pi/2) a; } """ # pylint: disable=cyclic-import from qiskit.circuit.quantumcircuit import QuantumCircuit from .u1 import U1Gate q = QuantumRegister(1, 'q') qc = QuantumCircuit(q, name=self.name) rules = [ (U1Gate(-pi / 2), [q[0]], []) ] qc._data = rules self.definition = qc def inverse(self): """Return inverse of Sdg (SGate).""" return SGate() def to_matrix(self): """Return a numpy.array for the Sdg gate.""" return numpy.array([[1, 0], [0, -1j]], dtype=complex)
# # (C) Copyright IBM 2017.
Review.js
const Sequelize = require('sequelize'); const { STRING, TEXT, INTEGER } = Sequelize; const db = require('../db'); const Review = db.define('review', { title: {
type: STRING }, content: { type: TEXT }, rating: { type: INTEGER, allowNull: false, validate: { isInt: true, min: 1, max: 5 } } }) module.exports = Review;
insight-strip.ts
import Component from '@glimmer/component'; import { noop } from 'lodash'; import { IDynamicLinkParams } from 'dynamic-link/components/dynamic-link'; import { TopConsumer } from '@datahub/metadata-types/constants/metadata/top-consumers'; interface ITopConsumersInsightStripArgs { /** * Title of the top consumer insight */ title: string; /* * Tooltip text for this component to provide additional information */ tooltipText: string; /** * A list of top consumer entity link params * Redirects the user to the entity page either on DataHub (if modeled) or externally */ topConsumers: Array<IDynamicLinkParams | string>; /** * The type of consumer used by this consumer for rendering avatars or links */ consumerType: TopConsumer; /** * Action triggered when the user clicks on the show more button */ onClickShowMore?: () => void; } /** * The preview limit on the number of top consumers shown on this insight without viewing all */ export const PREVIEW_LIMIT = 2; export const baseClass = 'top-consumers-insight-strip'; /** * This component is an insight strip that displays a preview of list of top consumers * (of the same consumer entity type) based on some preview limit. * The number of consumers not shown in the insight strip will be displayed to the user * and the user can access all top consumers information by clicking show more. */ export default class
extends Component<ITopConsumersInsightStripArgs> { /** * Declared for convenient access in the template */ baseClass = baseClass; /** * Top consumer enum declared in the component for convenient access */ topConsumer = TopConsumer; /** * The preview top consumers computed from the preview limit */ get topConsumersPreview(): Array<IDynamicLinkParams | string> { return this.args.topConsumers.slice(0, PREVIEW_LIMIT); } /** * The number of top consumers hidden from the preview */ get numberOfTopConsumersHidden(): number { const listLength = this.args.topConsumers.length; return Math.max(listLength - PREVIEW_LIMIT, 0); } /** * Action triggered when the user clicks on the show more button */ onClickShowMore: () => void = this.args.onClickShowMore || noop; }
TopConsumersInsightStrip
index.js
import React from "react"
const Option = Select.Option const Cascader = props => { return ( <div> <label>{props.label}</label> <Select showArrow allowClear style={props.style} placeholder={props.placeholder} optionFilterProp="children" onChange={props.handleChange} onFocus={props.handleFocus} onBlur={props.handleBlur} filterOption={props.filterOptions} defaultValue={props.defaultValue} > {props.optionArray ? ( props.optionArray.map((data, index) => { return <Option value={data.value}>{data.name}</Option> }) ) : ( <Option value="noData" disabled> No Data Found </Option> )} </Select> </div> ) } Cascader.defaultProps = { optionArrray: [], placeholder: "Select One", style: { width: "100%" } } export default Cascader
import PropTypes from "prop-types" import { Select } from "antd"
main.go
package main import ( "flag" "fmt" "io/ioutil" "os" "path/filepath" "sort" "strconv" "strings" "time" "github.com/GeoNet/delta/meta" ) const ( DateFormat = "2006-01-02" DateTimeFormat = "2006-01-02T15:04:05" ) var HeaderComments map[string]string = map[string]string{ "linz": ` Data supplied by the GeoNet project. GeoNet is core funded by EQC, with support from LINZ, and is operated by GNS on behalf of EQC and all New Zealanders. Contact: www.geonet.org.nz email: [email protected]. `, "geonet": ` Data supplied by the GeoNet project. GeoNet is core funded by EQC and is operated by GNS on behalf of EQC and all New Zealanders. Contact: www.geonet.org.nz email: [email protected]. `, "gsi": ` Data supplied by the GeoNet project and GSI (Tsukuba, Japan). GeoNet is core funded by EQC and is operated by GNS on behalf of EQC and all New Zealanders. Contact: www.geonet.org.nz email: [email protected]. `, "sagenz": ` Data supplied by the GeoNet project as part of the SAGENZ project. GeoNet is core funded by EQC and is operated by GNS on behalf of EQC and all New Zealanders. Contact: www.geonet.org.nz email: [email protected]. `, "swpacific": ` Data supplied by the GeoNet project as part of a joint project involving GNS, Ohio State Univ., Pacific GPS Facility at Univ. Hawaii, and the governments of a number of SW Pacific states. GeoNet is core funded by EQC and is operated by GNS on behalf of EQC and all New Zealanders. Contact: www.geonet.org.nz email: [email protected]. `, } var DownloadNameFormats map[string]DownloadNameFormatXML = map[string]DownloadNameFormatXML{ "x4": { Type: "long", Year: "x4 A4 x*", Month: "x8 A2 x*", Day: "x10 A2 x*", Hour: "x12 A2 x*", Minute: "x14 A2 x*", Second: "x16 A2 x*", }, "x5": { Type: "long", Year: "x5 A4 x*", Month: "x9 A2 x*", Day: "x11 A2 x*", Hour: "x14 A2 x*", Minute: "x16 A2 x*", Second: "x18 A2 x*", }, "x6": { Type: "short", Year: "x6 A2", YearDay: "x9 A3", }, "x15": { Type: "long", Year: "x15 A4 x*", Month: "x20 A2 x*", Day: "x23 A2 x*", Hour: "x26 A2 x*", Minute: "x28 A2 x*", Second: "x30 A2 x*", }, "x17": { Type: "long", Year: "x17 A4 x*", Month: "x22 A2 x*", Day: "x25 A2 x*", Hour: "x28 A2 x*", Minute: "x30 A2 x*", Second: "x32 A2 x*", }, "x18": { Type: "long", Year: "x18 A4 x*", Month: "x23 A2 x*", Day: "x26 A2 x*", Hour: "x29 A2 x*", Minute: "x31 A2 x*", Second: "x33 A2 x*", }, "x19": { Type: "long", Year: "x19 A4 x*", Month: "x24 A2 x*", Day: "x27 A2 x*", Hour: "x30 A2 x*", Minute: "x32 A2 x*", Second: "x34 A2 x*", }, "x21": { Type: "long", Year: "x21 A4 x*", Month: "x26 A2 x*", Day: "x29 A2 x*", Hour: "x32 A2 x*", Minute: "x34 A2 x*", Second: "x36 A2 x*", }, } type SessionList []meta.Session func (s SessionList) Len() int { return len(s) } func (s SessionList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s SessionList) Less(i, j int) bool { return s[i].Start.After(s[j].Start) } func main() { var verbose bool flag.BoolVar(&verbose, "verbose", false, "make noise") var output string flag.StringVar(&output, "output", "output", "output directory")
var stopped string flag.StringVar(&stopped, "stopped", "stopped.xml", "stopped status file name") var network string flag.StringVar(&network, "network", "../../network", "base network directory") var install string flag.StringVar(&install, "install", "../../install", "base install directory") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n") fmt.Fprintf(os.Stderr, "Build a RINEX configuration site XML file from delta meta information\n") fmt.Fprintf(os.Stderr, "\n") fmt.Fprintf(os.Stderr, "Usage:\n") fmt.Fprintf(os.Stderr, "\n") fmt.Fprintf(os.Stderr, " %s [options]\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\n") fmt.Fprintf(os.Stderr, "Options:\n") fmt.Fprintf(os.Stderr, "\n") flag.PrintDefaults() fmt.Fprintf(os.Stderr, "\n") } flag.Parse() var firmwareHistoryList meta.FirmwareHistoryList if err := meta.LoadList(filepath.Join(install, "firmware.csv"), &firmwareHistoryList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load firmware history: %v\n", err) os.Exit(-1) } firmwareHistory := make(map[string]map[string][]meta.FirmwareHistory) for _, i := range firmwareHistoryList { if _, ok := firmwareHistory[i.Model]; !ok { firmwareHistory[i.Model] = make(map[string][]meta.FirmwareHistory) } firmwareHistory[i.Model][i.Serial] = append(firmwareHistory[i.Model][i.Serial], i) } for j := range firmwareHistory { for k := range firmwareHistory[j] { sort.Sort(meta.FirmwareHistoryList(firmwareHistory[j][k])) } } var installedAntennaList meta.InstalledAntennaList if err := meta.LoadList(filepath.Join(install, "antennas.csv"), &installedAntennaList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load antenna installs: %v\n", err) os.Exit(-1) } installedAntenna := make(map[string][]meta.InstalledAntenna) for _, i := range installedAntennaList { installedAntenna[i.Mark] = append(installedAntenna[i.Mark], i) } for i := range installedAntenna { sort.Sort(sort.Reverse(meta.InstalledAntennaList(installedAntenna[i]))) } var deployedReceiverList meta.DeployedReceiverList if err := meta.LoadList(filepath.Join(install, "receivers.csv"), &deployedReceiverList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load receiver installs: %v\n", err) os.Exit(-1) } deployedReceivers := make(map[string][]meta.DeployedReceiver) for _, i := range deployedReceiverList { deployedReceivers[i.Mark] = append(deployedReceivers[i.Mark], i) } for i := range deployedReceivers { sort.Sort(sort.Reverse(meta.DeployedReceiverList(deployedReceivers[i]))) } var installedRadomeList meta.InstalledRadomeList if err := meta.LoadList(filepath.Join(install, "radomes.csv"), &installedRadomeList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load radome installs: %v\n", err) os.Exit(-1) } installedRadomes := make(map[string][]meta.InstalledRadome) for _, i := range installedRadomeList { installedRadomes[i.Mark] = append(installedRadomes[i.Mark], i) } for i := range installedRadomes { sort.Sort(meta.InstalledRadomeList(installedRadomes[i])) } var installedMetSensorList meta.InstalledMetSensorList if err := meta.LoadList(filepath.Join(install, "metsensors.csv"), &installedMetSensorList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load metsensors installs: %v\n", err) os.Exit(-1) } installedMetSensors := make(map[string][]meta.InstalledMetSensor) for _, i := range installedMetSensorList { installedMetSensors[i.Mark] = append(installedMetSensors[i.Mark], i) } for i := range installedMetSensors { sort.Sort(meta.InstalledMetSensorList(installedMetSensors[i])) } var markList meta.MarkList if err := meta.LoadList(filepath.Join(network, "marks.csv"), &markList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load mark list: %v\n", err) os.Exit(-1) } var monumentList meta.MonumentList if err := meta.LoadList(filepath.Join(network, "monuments.csv"), &monumentList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load monuments list: %v\n", err) os.Exit(-1) } monuments := make(map[string]meta.Monument) for _, m := range monumentList { monuments[m.Mark] = m } var sessionList meta.SessionList if err := meta.LoadList(filepath.Join(install, "sessions.csv"), &sessionList); err != nil { fmt.Fprintf(os.Stderr, "error: unable to load session list: %v\n", err) os.Exit(-1) } sort.Sort(SessionList(sessionList)) var on, off []Mark sessions := make(map[string][]meta.Session) for _, s := range sessionList { sessions[s.Mark] = append(sessions[s.Mark], s) } for _, m := range markList { if _, ok := sessions[m.Code]; !ok { continue } if _, ok := installedAntenna[m.Code]; !ok { continue } if _, ok := deployedReceivers[m.Code]; !ok { continue } var list []CGPSSessionXML for _, s := range sessions[m.Code] { for _, a := range installedAntenna[m.Code] { if a.Start.After(s.End) || a.End.Before(s.Start) { continue } for _, r := range deployedReceivers[m.Code] { if (!r.Start.Before(s.End)) || (!r.End.After(s.Start)) { continue } if r.Start.After(a.End) || r.End.Before(a.Start) { continue } radome := "NONE" if _, ok := installedRadomes[m.Code]; ok { for _, v := range installedRadomes[m.Code] { if v.Start.After(a.End) || v.Start.After(r.End) { continue } if v.End.Before(a.Start) || v.End.Before(r.Start) { continue } radome = v.Model } } var metsensor *MetSensor if _, ok := installedMetSensors[m.Code]; ok { for _, v := range installedMetSensors[m.Code] { if (!v.Start.Before(s.End)) || (!v.End.After(s.Start)) { continue } metsensor = &MetSensor{ Model: v.Make, Type: v.Model + " S/N " + v.Serial, HrAccuracy: strconv.FormatFloat(v.Accuracy.Humidity, 'g', -1, 64), PrAccuracy: strconv.FormatFloat(v.Accuracy.Pressure, 'g', -1, 64), TdAccuracy: strconv.FormatFloat(v.Accuracy.Temperature, 'g', -1, 64), } } } var firmware []FirmwareHistoryXML if _, ok := firmwareHistory[r.Model]; ok { if _, ok := firmwareHistory[r.Model][r.Serial]; ok { for i := range firmwareHistory[r.Model][r.Serial] { v := firmwareHistory[r.Model][r.Serial][len(firmwareHistory[r.Model][r.Serial])-i-1] /* if v.End.Before(r.Start) || v.Start.After(r.End) { continue } */ firmware = append(firmware, FirmwareHistoryXML{ StartTime: v.Start.Format(DateTimeFormat), StopTime: func() string { if time.Now().After(v.End) { return v.End.Format(DateTimeFormat) } else { return "open" } }(), Version: v.Version, }) } } } list = append(list, CGPSSessionXML{ StartTime: func() string { if r.Start.After(s.Start) && r.Start.After(a.Start) { return r.Start.Format(DateTimeFormat) } else if a.Start.After(s.Start) { return a.Start.Format(DateTimeFormat) } else { return s.Start.Format(DateTimeFormat) } }(), StopTime: func() string { if r.End.Before(s.End) && r.End.Before(a.End) { if time.Now().After(r.End) { return r.End.Format(DateTimeFormat) } else { return "open" } } else if a.End.Before(s.End) { if time.Now().After(a.End) { return a.End.Format(DateTimeFormat) } else { return "open" } } else { if time.Now().After(s.End) { return s.End.Format(DateTimeFormat) } else { return "open" } } }(), Receiver: ReceiverXML{ SerialNumber: r.Serial, IGSDesignation: r.Model, FirmwareHistories: firmware, }, InstalledCGPSAntenna: InstalledCGPSAntennaXML{ Height: Number{Units: "m", Value: fmt.Sprintf("%.4f", a.Vertical)}, OffsetNorth: Number{Units: "m", Value: fmt.Sprintf("%.4f", a.North)}, OffsetEast: Number{Units: "m", Value: fmt.Sprintf("%.4f", a.East)}, Radome: radome, CGPSAntenna: CGPSAntennaXML{ SerialNumber: a.Serial, IGSDesignation: a.Model, }, }, MetSensor: metsensor, ObservationInterval: Number{ Units: "s", Value: fmt.Sprintf("%.0f", s.Interval.Seconds()), }, Operator: OperatorXML{ Name: s.Operator, Agency: s.Agency, }, Rinex: RinexXML{ HeaderCommentName: s.HeaderComment, HeaderCommentText: func() string { if t, ok := HeaderComments[s.HeaderComment]; ok { return strings.Replace(strings.Join(strings.Fields(t), " "), "email", " email", -1) } return "" }(), }, DataFormat: func() string { parts := strings.Fields(s.Format) if len(parts) > 0 { return parts[0] } return "unknown" }(), DownloadNameFormat: func() DownloadNameFormatXML { parts := strings.Fields(s.Format) if len(parts) > 1 { if f, ok := DownloadNameFormats[parts[1]]; ok { return f } } return DownloadNameFormatXML{} }(), }) } } } sort.Sort(CGPSSessionXMLs(list)) if time.Now().Before(m.End) { on = append(on, Mark{ Name: m.Name, Code: m.Code, Lat: strconv.FormatFloat(m.Latitude, 'f', 14, 64), Lon: strconv.FormatFloat(m.Longitude, 'f', 14, 64), Network: m.Network, Opened: m.Start.Format(DateFormat), }) } else { off = append(off, Mark{ Name: m.Name, Code: m.Code, Lat: strconv.FormatFloat(m.Latitude, 'f', 14, 64), Lon: strconv.FormatFloat(m.Longitude, 'f', 14, 64), Network: m.Network, Opened: m.Start.Format(DateFormat), Closed: m.End.Format(DateFormat), }) } x := NewSiteXML( MarkXML{ GeodeticCode: m.Code, DomesNumber: func() string { if c, ok := monuments[m.Code]; ok { return c.DomesNumber } return "" }(), }, LocationXML{ Latitude: m.Latitude, Longitude: m.Longitude, Height: m.Elevation, Datum: m.Datum, }, list, ) s, err := x.Marshal() if err != nil { fmt.Fprintf(os.Stderr, "error: unable to marsh xml: %v\n", err) os.Exit(-1) } s = []byte(strings.Replace(string(s), "<domes-number></domes-number>", "<domes-number />", -1)) xmlfile := filepath.Join(output, strings.ToLower(m.Code)+".xml") if err := os.MkdirAll(filepath.Dir(xmlfile), 0755); err != nil { fmt.Fprintf(os.Stderr, "error: unable to create dir: %v\n", err) os.Exit(-1) } if err := ioutil.WriteFile(xmlfile, s, 0644); err != nil { fmt.Fprintf(os.Stderr, "error: unable to write file: %v\n", err) os.Exit(-1) } } s, err := (Marks{Name: "CGPS Marks. Status: Operational. ", Marks: on}).Marshal() if err != nil { fmt.Fprintf(os.Stderr, "error: unable to marsh xml: %v\n", err) os.Exit(-1) } s = []byte(strings.Replace(string(s), "></mark>", "/>", -1)) xmlfile := filepath.Join(output, operational) if err := os.MkdirAll(filepath.Dir(xmlfile), 0755); err != nil { fmt.Fprintf(os.Stderr, "error: unable to create dir: %v\n", err) os.Exit(-1) } if err := ioutil.WriteFile(xmlfile, s, 0644); err != nil { fmt.Fprintf(os.Stderr, "error: unable to write file: %v\n", err) os.Exit(-1) } s, err = (Marks{Name: "CGPS Marks. Status: Stopped. ", Marks: off}).Marshal() if err != nil { fmt.Fprintf(os.Stderr, "error: unable to marsh xml: %v\n", err) os.Exit(-1) } s = []byte(strings.Replace(string(s), "></mark>", "/>", -1)) xmlfile = filepath.Join(output, stopped) if err := os.MkdirAll(filepath.Dir(xmlfile), 0755); err != nil { fmt.Fprintf(os.Stderr, "error: unable to create dir: %v\n", err) os.Exit(-1) } if err := ioutil.WriteFile(xmlfile, s, 0644); err != nil { fmt.Fprintf(os.Stderr, "error: unable to write file: %v\n", err) os.Exit(-1) } }
var operational string flag.StringVar(&operational, "operational", "operational.xml", "operational status file name")
public_api.go
package cmd // TODO: this file should probably move out of `./cli/cmd` into something like // `./cli/publicapi` or `./cli/pkg`: // https://github.com/linkerd/linkerd2/issues/2735 import ( "context" "fmt" "os" "time" "github.com/linkerd/linkerd2/controller/api/public" pb "github.com/linkerd/linkerd2/controller/gen/public" "github.com/linkerd/linkerd2/pkg/healthcheck" "github.com/linkerd/linkerd2/pkg/k8s" ) // rawPublicAPIClient creates a raw public API client with no validation. func rawPublicAPIClient(ctx context.Context) (pb.ApiClient, error) { if apiAddr != "" { return public.NewInternalClient(controlPlaneNamespace, apiAddr) } kubeAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0) if err != nil { return nil, err } return public.NewExternalClient(ctx, controlPlaneNamespace, kubeAPI) } // checkPublicAPIClientOrExit builds a new public API client and executes default status // checks to determine if the client can successfully perform cli commands. If the // checks fail, then CLI will print an error and exit. func checkPublicAPIClientOrExit() public.APIClient { return checkPublicAPIClientOrRetryOrExit(time.Time{}, false) } // checkPublicAPIClientWithDeadlineOrExit builds a new public API client and executes status // checks to determine if the client can successfully connect to the API. If the // checks fail, then CLI will print an error and exit. If the retryDeadline // param is specified, then the CLI will print a message to stderr and retry. func checkPublicAPIClientOrRetryOrExit(retryDeadline time.Time, apiChecks bool) public.APIClient { checks := []healthcheck.CategoryID{ healthcheck.KubernetesAPIChecks, healthcheck.LinkerdControlPlaneExistenceChecks, } if apiChecks { checks = append(checks, healthcheck.LinkerdAPIChecks) } hc := newHealthChecker(checks, retryDeadline) hc.RunChecks(exitOnError) return hc.PublicAPIClient() } func newHealthChecker(checks []healthcheck.CategoryID, retryDeadline time.Time) *healthcheck.HealthChecker { return healthcheck.NewHealthChecker(checks, &healthcheck.Options{ ControlPlaneNamespace: controlPlaneNamespace, KubeConfig: kubeconfigPath, KubeContext: kubeContext, Impersonate: impersonate, ImpersonateGroup: impersonateGroup, APIAddr: apiAddr, RetryDeadline: retryDeadline, }) } func
(result *healthcheck.CheckResult) { if result.Retry { fmt.Fprintln(os.Stderr, "Waiting for control plane to become available") return } if result.Err != nil && !result.Warning { var msg string switch result.Category { case healthcheck.KubernetesAPIChecks: msg = "Cannot connect to Kubernetes" case healthcheck.LinkerdControlPlaneExistenceChecks: msg = "Cannot find Linkerd" case healthcheck.LinkerdAPIChecks: msg = "Cannot connect to Linkerd" } fmt.Fprintf(os.Stderr, "%s: %s\n", msg, result.Err) checkCmd := "linkerd check" if controlPlaneNamespace != defaultLinkerdNamespace { checkCmd += fmt.Sprintf(" --linkerd-namespace %s", controlPlaneNamespace) } fmt.Fprintf(os.Stderr, "Validate the install with: %s\n", checkCmd) os.Exit(1) } }
exitOnError
group.go
// Copyright 2018 syncd Author. All Rights Reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package user import ( "errors" "strings" "fmt" "github.com/tinystack/goutil/gostring" "github.com/tinystack/goutil/gois" baseModel "github.com/dreamans/syncd/model" userGroupModel "github.com/dreamans/syncd/model/user_group" ) type Group struct { ID int `json:"id"` Name string `json:"name"` Priv []int `json:"priv"` Ctime int `json:"ctime"` } type GroupItem struct { ID int `json:"id"` Name string `json:"name"` } func GroupUserListFillGroupName(list []UserItem) ([]UserItem, error)
func (g *Group) Detail() error { if g.ID == 0 { return errors.New("id can not be empty") } detail, ok := userGroupModel.Get(g.ID) if !ok { return errors.New("get user group detail data failed") } if detail.ID == 0 { return errors.New("user group not exists") } privList := []int{} if detail.Priv != "" { strPrivList := gostring.StrFilterSliceEmpty(strings.Split(detail.Priv, ",")) privList = gostring.StrSlice2IntSlice(strPrivList) } g.ID = detail.ID g.Name = detail.Name g.Priv = privList g.Ctime = detail.Ctime return nil } func (g *Group) CreateOrUpdate() error { var ok bool group := userGroupModel.UserGroup{ Name: g.Name, Priv: strings.Join(gostring.IntSlice2StrSlice(g.Priv), ","), } if g.ID > 0 { ok = userGroupModel.Update(g.ID, map[string]interface{}{ "name": group.Name, "priv": group.Priv, }) } else { ok = userGroupModel.Create(&group) } if !ok { return errors.New("user group data update failed") } return nil } func (g *Group) List(keyword string, offset, limit int) ([]GroupItem, int, error) { var ( ok bool groupId, total int where []baseModel.WhereParam groupList []GroupItem ) if keyword != "" { if gois.IsInteger(keyword) { groupId = gostring.Str2Int(keyword) if groupId > 0 { where = append(where, baseModel.WhereParam{ Field: "id", Prepare: groupId, }) } } else { where = append(where, baseModel.WhereParam{ Field: "name", Tag: "LIKE", Prepare: fmt.Sprintf("%%%s%%", keyword), }) } } list, ok := userGroupModel.List(baseModel.QueryParam{ Fields: "id, name", Offset: offset, Limit: limit, Order: "id DESC", Where: where, }) if !ok { return nil, 0, errors.New("get user group list data failed") } total, ok = userGroupModel.Total(baseModel.QueryParam{ Where: where, }) if !ok { return nil, 0, errors.New("get user group total count failed") } for _, g := range list { groupList = append(groupList, GroupItem{ ID: g.ID, Name: g.Name, }) } return groupList, total, nil } func (g *Group) Delete() error { if g.ID == 0 { return errors.New("id can not be empty") } if err := g.Detail(); err != nil { return errors.New("user group not exists") } ok := userGroupModel.Delete(g.ID) if !ok { return errors.New("user group delete failed") } return nil } func (g *Group) GetNameByIds(ids []int) (map[int]string, error){ list, ok := userGroupModel.List(baseModel.QueryParam{ Fields: "id, name", Where: []baseModel.WhereParam{ baseModel.WhereParam{ Field: "id", Tag: "IN", Prepare: ids, }, }, }) if !ok { return nil, errors.New("get user group list failed") } groupNameList := make(map[int]string) for _, g := range list { groupNameList[g.ID] = g.Name } return groupNameList, nil } func (g *Group) CheckGroupExists() (bool, error){ var where []baseModel.WhereParam if g.Name != "" { where = append(where, baseModel.WhereParam{ Field: "name", Prepare: g.Name, }) } if g.ID > 0 { where = append(where, baseModel.WhereParam{ Field: "id", Tag: "!=", Prepare: g.ID, }) } detail, ok := userGroupModel.GetOne(baseModel.QueryParam{ Where: where, }) if !ok { return false, errors.New("get group one data failed") } return detail.ID > 0, nil }
{ var groupIdList []int for _, l := range list { groupIdList = append(groupIdList, l.GroupId) } if len(groupIdList) > 0 { group := &Group{} groupNameList, err := group.GetNameByIds(groupIdList) if err != nil { return nil, err } for k, v := range list { if groupName, exists := groupNameList[v.GroupId]; exists { list[k].GroupName = groupName } } } return list, nil }
response_parser.go
package skyspark import ( "encoding/json" "fmt" "regexp" "strconv" "strings" "github.com/grafana/grafana/pkg/components/null" "github.com/grafana/grafana/pkg/tsdb" ) type ResponseParser struct{} var ( legendFormat *regexp.Regexp ) func
() { legendFormat = regexp.MustCompile(`\[\[(\w+)(\.\w+)*\]\]*|\$\s*(\w+?)*`) } func (rp *ResponseParser) Parse(response *Response, query *Query) *tsdb.QueryResult { queryRes := tsdb.NewQueryResult() for _, result := range response.Results { queryRes.Series = append(queryRes.Series, rp.transformRows(result.Series, queryRes, query)...) } return queryRes } func (rp *ResponseParser) transformRows(rows []Row, queryResult *tsdb.QueryResult, query *Query) tsdb.TimeSeriesSlice { var result tsdb.TimeSeriesSlice for _, row := range rows { for columnIndex, column := range row.Columns { if column == "time" { continue } var points tsdb.TimeSeriesPoints for _, valuePair := range row.Values { point, err := rp.parseTimepoint(valuePair, columnIndex) if err == nil { points = append(points, point) } } result = append(result, &tsdb.TimeSeries{ Name: rp.formatSerieName(row, column, query), Points: points, Tags: row.Tags, }) } } return result } func (rp *ResponseParser) formatSerieName(row Row, column string, query *Query) string { if query.Alias == "" { return rp.buildSerieNameFromQuery(row, column) } nameSegment := strings.Split(row.Name, ".") result := legendFormat.ReplaceAllFunc([]byte(query.Alias), func(in []byte) []byte { aliasFormat := string(in) aliasFormat = strings.Replace(aliasFormat, "[[", "", 1) aliasFormat = strings.Replace(aliasFormat, "]]", "", 1) aliasFormat = strings.Replace(aliasFormat, "$", "", 1) if aliasFormat == "m" || aliasFormat == "measurement" { return []byte(query.Measurement) } if aliasFormat == "col" { return []byte(column) } pos, err := strconv.Atoi(aliasFormat) if err == nil && len(nameSegment) >= pos { return []byte(nameSegment[pos]) } if !strings.HasPrefix(aliasFormat, "tag_") { return in } tagKey := strings.Replace(aliasFormat, "tag_", "", 1) tagValue, exist := row.Tags[tagKey] if exist { return []byte(tagValue) } return in }) return string(result) } func (rp *ResponseParser) buildSerieNameFromQuery(row Row, column string) string { var tags []string for k, v := range row.Tags { tags = append(tags, fmt.Sprintf("%s: %s", k, v)) } tagText := "" if len(tags) > 0 { tagText = fmt.Sprintf(" { %s }", strings.Join(tags, " ")) } return fmt.Sprintf("%s.%s%s", row.Name, column, tagText) } func (rp *ResponseParser) parseTimepoint(valuePair []interface{}, valuePosition int) (tsdb.TimePoint, error) { var value null.Float = rp.parseValue(valuePair[valuePosition]) timestampNumber, _ := valuePair[0].(json.Number) timestamp, err := timestampNumber.Float64() if err != nil { return tsdb.TimePoint{}, err } return tsdb.NewTimePoint(value, timestamp), nil } func (rp *ResponseParser) parseValue(value interface{}) null.Float { number, ok := value.(json.Number) if !ok { return null.FloatFromPtr(nil) } fvalue, err := number.Float64() if err == nil { return null.FloatFrom(fvalue) } ivalue, err := number.Int64() if err == nil { return null.FloatFrom(float64(ivalue)) } return null.FloatFromPtr(nil) }
init
scroll.service.ts
import { Injectable, Inject } from '@angular/core'; import { PlatformLocation } from '@angular/common'; import { DOCUMENT } from '@angular/platform-browser'; import {fromEvent} from 'rxjs/observable/fromEvent'; export const topMargin = 16; /** * A service that scrolls document elements into view */ @Injectable() export class
{ private _topOffset: number | null; private _topOfPageElement: Element; // Offset from the top of the document to bottom of any static elements // at the top (e.g. toolbar) + some margin get topOffset() { if (!this._topOffset) { const toolbar = this.document.querySelector('.app-toolbar'); this._topOffset = (toolbar && toolbar.clientHeight || 0) + topMargin; } return this._topOffset!; } get topOfPageElement() { if (!this._topOfPageElement) { this._topOfPageElement = this.document.getElementById('top-of-page') || this.document.body; } return this._topOfPageElement; } constructor( @Inject(DOCUMENT) private document: any, private location: PlatformLocation) { // On resize, the toolbar might change height, so "invalidate" the top offset. fromEvent(window, 'resize').subscribe(() => this._topOffset = null); } /** * Scroll to the element with id extracted from the current location hash fragment. * Scroll to top if no hash. * Don't scroll if hash not found. */ scroll() { const hash = this.getCurrentHash(); const element: HTMLElement = hash ? this.document.getElementById(hash) : this.topOfPageElement; this.scrollToElement(element); } /** * Scroll to the element. * Don't scroll if no element. */ scrollToElement(element: Element|null) { if (element) { element.scrollIntoView(); if (window && window.scrollBy) { // Scroll as much as necessary to align the top of `element` at `topOffset`. // (Usually, `.top` will be 0, except for cases where the element cannot be scrolled all the // way to the top, because the viewport is larger than the height of the content after the // element.) window.scrollBy(0, element.getBoundingClientRect().top - this.topOffset); // If we are very close to the top (<20px), then scroll all the way up. // (This can happen if `element` is at the top of the page, but has a small top-margin.) if (window.pageYOffset < 20) { window.scrollBy(0, -window.pageYOffset); } } } } /** Scroll to the top of the document. */ scrollToTop() { this.scrollToElement(this.topOfPageElement); } /** * Return the hash fragment from the `PlatformLocation`, minus the leading `#`. */ private getCurrentHash() { return decodeURIComponent(this.location.hash.replace(/^#/, '')); } }
ScrollService
level-one.js
var Player = require('./player'); var Listener = require('./listener'); var LevelOneLayout = require('./level-one-layout'); var Detector = require('./detector'); var UserInfoDraw = require('./user-info-draw'); function LevelOne (canvas, score) { this.blocks = [];
this.listener = new Listener(canvas, this.firstPlayer, this.projectiles, score); this.level = new LevelOneLayout(this.blocks); this.detector = new Detector(this.firstPlayer, this.blocks, this.projectiles); this.userInfoDraw = new UserInfoDraw(); this.listener.setListeners(); this.level.buildLevel(); } module.exports = LevelOne;
this.projectiles = {blue: null, orange: null}; this.firstPlayer = new Player({x: 300, y: 500});
minesweeper.py
import math import random import numpy as np MINE_BIT = 0b01 FLAG_BIT = 0b10 EMPTY_SLOT = 0xFF
SURROUNDING = [ (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1), ] class Minesweeper: def __init__(self, *shape, seed=None): if len(shape) < 1: shape = (10, 10) bomb_count = 7 else: shape, bomb_count = shape[:-1], shape[-1] if math.prod(shape) < bomb_count: raise ValueError('cannot be more bombs than spaces on the board') self.board_matrix = np.zeros(shape, 'uint16') self.render_matrix = np.full(shape, EMPTY_SLOT, 'uint8') randomizer = random.Random(seed) bombs = [] while bomb_count: bomb = [] for size in shape: bomb.append(randomizer.randrange(size)) bomb = tuple(bomb) if bomb not in bombs: bombs.append(bomb) self.board_matrix[bomb] |= MINE_BIT bomb_count -= 1 def add_flag(self, *pos): self.board_matrix[pos] |= FLAG_BIT self.render_matrix[pos] = FLAG_SLOT def remove_flag(self, *pos): self.board_matrix[pos] ^= FLAG_BIT self.render_matrix[pos] = EMPTY_SLOT def is_flagged(self, *pos): return FLAG_BIT & self.board_matrix[pos] def toggle_flag(self, *pos): if self.is_flagged(*pos): self.remove_flag(*pos) else: self.add_flag(*pos) def _reveal(self, pos): cell = self.board_matrix[pos] if cell & FLAG_BIT: return -2 elif cell & MINE_BIT: return -1 else: count = 0 shape = self.board_matrix.shape for direction in SURROUNDING: # newpos = (pos[0] + direction[0], pos[1] + direction[1]) newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction))))) if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))): count += self.board_matrix[newpos] & MINE_BIT return count def reveal(self, *pos): count = self._reveal(pos) if count >= 0: self.render_matrix[pos] = count return count def recursive_reveal(self, *pos, reached=None): if reached is None: reached = set() if pos in reached: return None count = self.reveal(*pos) reached.add(pos) if count == 0: shape = self.board_matrix.shape for direction in SURROUNDING: # newpos = (pos[0] + direction[0], pos[1] + direction[1]) newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction))))) if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))): if newpos not in reached: self.recursive_reveal(*newpos, reached=reached) return count def has_won(self): return all((bool(cell & FLAG_BIT) == bool(cell & MINE_BIT)) for cell in np.nditer(self.board_matrix)) def reveal_all(self): for (pos, cell) in np.ndenumerate(self.board_matrix): if not cell & FLAG_BIT and not cell & MINE_BIT: self.reveal(*pos)
FLAG_SLOT = 0xFE
no_macrons.rs
use crate::iter::char::Iterators; use crate::litterae; /// Strip macrons from the `char` sequence.
iter: I, } impl<I: Iterator<Item = char>> NoMacrons<I> { pub fn new(iter: I) -> NoMacrons<I> { NoMacrons { iter } } } impl<I: Iterator<Item = char>> Iterators for NoMacrons<I> {} impl<I: Iterator<Item = char>> Iterator for NoMacrons<I> { type Item = char; fn next(&mut self) -> Option<Self::Item> { self.iter.next().map(litterae::remove_macron) } }
pub struct NoMacrons<I> {
mod.rs
//! Base elements required to build views. //! //! Views are the main building blocks of your UI. //! //! A view can delegate part or all of its responsabilities to child views, //! forming a view tree. The root of this tree is a `StackView` handled //! directly by the `Cursive` element. //! //! # Layout //! //! The layout phase is when the size and location of each view is computed. //! //! Each view is given an area of the screen by the `View::layout()` method. //! With this, the view is free to plan its content, including calling //! `View::layout()` on its own children. //! //! In order to determine how much space should be given each child, parents //! can use `View::required_size()` on them. //! //! //! ### Contracts //! //! When building new Views, you should respect these contracts: //! //! * By default, `View::layout()` should be called before any call to //! `View::draw()` with the same available size. The only exceptions is //! when both following conditions are met: //! * The available size has not changed since the last call to //! `View::layout()` //! * `View::needs_relayout()` returns `false` //! //! In this case, it is safe to omit the call to `View::layout()`. //! //! * The value returned by `required_size` should be an actually viable size, //! no matter what the request is. This means calling `View::layout()` with //! a size returned by `required_size` is **never** an error. #[macro_use] mod view_wrapper; // Essentials components mod any; mod finder; mod margins; mod position; mod size_cache; mod size_constraint; mod view_path; mod view_trait; // Helper bases mod boxable; mod identifiable;
pub mod scroll; #[cfg(not(feature = "unstable_scroll"))] #[allow(dead_code)] pub(crate) mod scroll; mod scroll_base; mod scrollable; mod into_boxed_view; pub use self::any::AnyView; pub use self::boxable::Boxable; pub use self::finder::{Finder, Selector}; pub use self::identifiable::Identifiable; pub use self::into_boxed_view::IntoBoxedView; pub use self::margins::Margins; pub use self::position::{Offset, Position}; pub use self::scroll::ScrollStrategy; pub use self::scroll_base::ScrollBase; pub use self::scrollable::Scrollable; pub use self::size_cache::SizeCache; pub use self::size_constraint::SizeConstraint; pub use self::view_path::ViewPath; pub use self::view_trait::View; pub use self::view_wrapper::ViewWrapper;
#[cfg(feature = "unstable_scroll")]
semanal_typeddict.py
"""Semantic analysis of TypedDict definitions.""" from mypy.backports import OrderedDict from typing import Optional, List, Set, Tuple from typing_extensions import Final from mypy.types import Type, AnyType, TypeOfAny, TypedDictType, TPDICT_NAMES from mypy.nodes import ( CallExpr, TypedDictExpr, Expression, NameExpr, Context, StrExpr, BytesExpr, UnicodeExpr, ClassDef, RefExpr, TypeInfo, AssignmentStmt, PassStmt, ExpressionStmt, EllipsisExpr, TempNode, DictExpr, ARG_POS, ARG_NAMED ) from mypy.semanal_shared import SemanticAnalyzerInterface from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError from mypy.options import Options from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type from mypy.messages import MessageBuilder from mypy.errorcodes import ErrorCode from mypy import errorcodes as codes TPDICT_CLASS_ERROR: Final = ( "Invalid statement in TypedDict definition; " 'expected "field_name: field_type"' ) class TypedDictAnalyzer: def __init__(self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder) -> None: self.options = options self.api = api self.msg = msg def analyze_typeddict_classdef(self, defn: ClassDef) -> Tuple[bool, Optional[TypeInfo]]: """Analyze a class that may define a TypedDict. Assume that base classes have been analyzed already. Note: Unlike normal classes, we won't create a TypeInfo until the whole definition of the TypeDict (including the body and all key names and types) is complete. This is mostly because we store the corresponding TypedDictType in the TypeInfo. Return (is this a TypedDict, new TypeInfo). Specifics: * If we couldn't finish due to incomplete reference anywhere in the definition, return (True, None). * If this is not a TypedDict, return (False, None). """ possible = False for base_expr in defn.base_type_exprs: if isinstance(base_expr, RefExpr): self.api.accept(base_expr) if base_expr.fullname in TPDICT_NAMES or self.is_typeddict(base_expr): possible = True if possible: if (len(defn.base_type_exprs) == 1 and isinstance(defn.base_type_exprs[0], RefExpr) and defn.base_type_exprs[0].fullname in TPDICT_NAMES): # Building a new TypedDict fields, types, required_keys = self.analyze_typeddict_classdef_fields(defn) if fields is None: return True, None # Defer info = self.build_typeddict_typeinfo(defn.name, fields, types, required_keys, defn.line) defn.analyzed = TypedDictExpr(info) defn.analyzed.line = defn.line defn.analyzed.column = defn.column return True, info # Extending/merging existing TypedDicts if any(not isinstance(expr, RefExpr) or expr.fullname not in TPDICT_NAMES and not self.is_typeddict(expr) for expr in defn.base_type_exprs): self.fail("All bases of a new TypedDict must be TypedDict types", defn) typeddict_bases = list(filter(self.is_typeddict, defn.base_type_exprs)) keys: List[str] = [] types = [] required_keys = set() # Iterate over bases in reverse order so that leftmost base class' keys take precedence for base in reversed(typeddict_bases): assert isinstance(base, RefExpr) assert isinstance(base.node, TypeInfo) assert isinstance(base.node.typeddict_type, TypedDictType) base_typed_dict = base.node.typeddict_type base_items = base_typed_dict.items valid_items = base_items.copy() for key in base_items: if key in keys: self.fail('Overwriting TypedDict field "{}" while merging' .format(key), defn) keys.extend(valid_items.keys()) types.extend(valid_items.values()) required_keys.update(base_typed_dict.required_keys) new_keys, new_types, new_required_keys = self.analyze_typeddict_classdef_fields(defn, keys) if new_keys is None: return True, None # Defer keys.extend(new_keys) types.extend(new_types) required_keys.update(new_required_keys) info = self.build_typeddict_typeinfo(defn.name, keys, types, required_keys, defn.line) defn.analyzed = TypedDictExpr(info) defn.analyzed.line = defn.line defn.analyzed.column = defn.column return True, info return False, None def analyze_typeddict_classdef_fields( self, defn: ClassDef, oldfields: Optional[List[str]] = None) -> Tuple[Optional[List[str]], List[Type], Set[str]]: """Analyze fields defined in a TypedDict class definition. This doesn't consider inherited fields (if any). Also consider totality, if given. Return tuple with these items: * List of keys (or None if found an incomplete reference --> deferral) * List of types for each key * Set of required keys """ fields: List[str] = [] types: List[Type] = [] for stmt in defn.defs.body: if not isinstance(stmt, AssignmentStmt): # Still allow pass or ... (for empty TypedDict's). if (not isinstance(stmt, PassStmt) and not (isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, (EllipsisExpr, StrExpr)))): self.fail(TPDICT_CLASS_ERROR, stmt) elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr): # An assignment, but an invalid one. self.fail(TPDICT_CLASS_ERROR, stmt) else: name = stmt.lvalues[0].name if name in (oldfields or []): self.fail('Overwriting TypedDict field "{}" while extending' .format(name), stmt) if name in fields: self.fail('Duplicate TypedDict key "{}"'.format(name), stmt) continue # Append name and type in this case... fields.append(name) if stmt.type is None: types.append(AnyType(TypeOfAny.unannotated)) else: analyzed = self.api.anal_type(stmt.type) if analyzed is None: return None, [], set() # Need to defer types.append(analyzed) # ...despite possible minor failures that allow further analyzis. if stmt.type is None or hasattr(stmt, 'new_syntax') and not stmt.new_syntax: self.fail(TPDICT_CLASS_ERROR, stmt) elif not isinstance(stmt.rvalue, TempNode): # x: int assigns rvalue to TempNode(AnyType()) self.fail('Right hand side values are not supported in TypedDict', stmt) total: Optional[bool] = True if 'total' in defn.keywords: total = self.api.parse_bool(defn.keywords['total']) if total is None: self.fail('Value of "total" must be True or False', defn) total = True required_keys = set(fields) if total else set() return fields, types, required_keys def check_typeddict(self, node: Expression, var_name: Optional[str], is_func_scope: bool) -> Tuple[bool, Optional[TypeInfo]]:
def parse_typeddict_args( self, call: CallExpr) -> Optional[Tuple[str, List[str], List[Type], bool, bool]]: """Parse typed dict call expression. Return names, types, totality, was there an error during parsing. If some type is not ready, return None. """ # TODO: Share code with check_argument_count in checkexpr.py? args = call.args if len(args) < 2: return self.fail_typeddict_arg("Too few arguments for TypedDict()", call) if len(args) > 3: return self.fail_typeddict_arg("Too many arguments for TypedDict()", call) # TODO: Support keyword arguments if call.arg_kinds not in ([ARG_POS, ARG_POS], [ARG_POS, ARG_POS, ARG_NAMED]): return self.fail_typeddict_arg("Unexpected arguments to TypedDict()", call) if len(args) == 3 and call.arg_names[2] != 'total': return self.fail_typeddict_arg( 'Unexpected keyword argument "{}" for "TypedDict"'.format(call.arg_names[2]), call) if not isinstance(args[0], (StrExpr, BytesExpr, UnicodeExpr)): return self.fail_typeddict_arg( "TypedDict() expects a string literal as the first argument", call) if not isinstance(args[1], DictExpr): return self.fail_typeddict_arg( "TypedDict() expects a dictionary literal as the second argument", call) total: Optional[bool] = True if len(args) == 3: total = self.api.parse_bool(call.args[2]) if total is None: return self.fail_typeddict_arg( 'TypedDict() "total" argument must be True or False', call) dictexpr = args[1] res = self.parse_typeddict_fields_with_types(dictexpr.items, call) if res is None: # One of the types is not ready, defer. return None items, types, ok = res for t in types: check_for_explicit_any(t, self.options, self.api.is_typeshed_stub_file, self.msg, context=call) if self.options.disallow_any_unimported: for t in types: if has_any_from_unimported_type(t): self.msg.unimported_type_becomes_any("Type of a TypedDict key", t, dictexpr) assert total is not None return args[0].value, items, types, total, ok def parse_typeddict_fields_with_types( self, dict_items: List[Tuple[Optional[Expression], Expression]], context: Context) -> Optional[Tuple[List[str], List[Type], bool]]: """Parse typed dict items passed as pairs (name expression, type expression). Return names, types, was there an error. If some type is not ready, return None. """ seen_keys = set() items: List[str] = [] types: List[Type] = [] for (field_name_expr, field_type_expr) in dict_items: if isinstance(field_name_expr, (StrExpr, BytesExpr, UnicodeExpr)): key = field_name_expr.value items.append(key) if key in seen_keys: self.fail('Duplicate TypedDict key "{}"'.format(key), field_name_expr) seen_keys.add(key) else: name_context = field_name_expr or field_type_expr self.fail_typeddict_arg("Invalid TypedDict() field name", name_context) return [], [], False try: type = expr_to_unanalyzed_type(field_type_expr, self.options, self.api.is_stub_file) except TypeTranslationError: self.fail_typeddict_arg('Invalid field type', field_type_expr) return [], [], False analyzed = self.api.anal_type(type) if analyzed is None: return None types.append(analyzed) return items, types, True def fail_typeddict_arg(self, message: str, context: Context) -> Tuple[str, List[str], List[Type], bool, bool]: self.fail(message, context) return '', [], [], True, False def build_typeddict_typeinfo(self, name: str, items: List[str], types: List[Type], required_keys: Set[str], line: int) -> TypeInfo: # Prefer typing then typing_extensions if available. fallback = (self.api.named_type_or_none('typing._TypedDict', []) or self.api.named_type_or_none('typing_extensions._TypedDict', []) or self.api.named_type_or_none('mypy_extensions._TypedDict', [])) assert fallback is not None info = self.api.basic_new_typeinfo(name, fallback, line) info.typeddict_type = TypedDictType(OrderedDict(zip(items, types)), required_keys, fallback) return info # Helpers def is_typeddict(self, expr: Expression) -> bool: return (isinstance(expr, RefExpr) and isinstance(expr.node, TypeInfo) and expr.node.typeddict_type is not None) def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None: self.api.fail(msg, ctx, code=code)
"""Check if a call defines a TypedDict. The optional var_name argument is the name of the variable to which this is assigned, if any. Return a pair (is it a typed dict, corresponding TypeInfo). If the definition is invalid but looks like a TypedDict, report errors but return (some) TypeInfo. If some type is not ready, return (True, None). """ if not isinstance(node, CallExpr): return False, None call = node callee = call.callee if not isinstance(callee, RefExpr): return False, None fullname = callee.fullname if fullname not in TPDICT_NAMES: return False, None res = self.parse_typeddict_args(call) if res is None: # This is a valid typed dict, but some type is not ready. # The caller should defer this until next iteration. return True, None name, items, types, total, ok = res if not ok: # Error. Construct dummy return value. info = self.build_typeddict_typeinfo('TypedDict', [], [], set(), call.line) else: if var_name is not None and name != var_name: self.fail( 'First argument "{}" to TypedDict() does not match variable name "{}"'.format( name, var_name), node, code=codes.NAME_MATCH) if name != var_name or is_func_scope: # Give it a unique name derived from the line number. name += '@' + str(call.line) required_keys = set(items) if total else set() info = self.build_typeddict_typeinfo(name, items, types, required_keys, call.line) info.line = node.line # Store generated TypeInfo under both names, see semanal_namedtuple for more details. if name != var_name or is_func_scope: self.api.add_symbol_skip_local(name, info) if var_name: self.api.add_symbol(var_name, info, node) call.analyzed = TypedDictExpr(info) call.analyzed.set_line(call.line, call.column) return True, info
generator.rs
use dice::Dice; use rand::Rng; pub trait Generator { type Output: IntoIterator; fn generate(&mut self, dice: &Dice) -> Self::Output; } impl<T: Rng> Generator for T { type Output = Vec<u32>; fn generate(&mut self, dice: &Dice) -> Self::Output { (0..dice.count).map(|_| self.gen_range(0, dice.range) + 1).collect() }
}
trait-object-nested-in-impl-trait.rs
trait Foo {} impl<'a, T: Foo> Foo for &'a T {} impl<T: Foo + ?Sized> Foo for Box<T> {} struct Iter<'a, T> { current: Option<Box<dyn Foo + 'a>>, remaining: T, } impl<'a, T> Iterator for Iter<'a, T> where T: Iterator, T::Item: Foo + 'a, { type Item = Box<dyn Foo + 'a>; fn next(&mut self) -> Option<Self::Item> { let result = self.current.take(); self.current = Box::new(self.remaining.next()).map(|f| Box::new(f) as _); result } } struct Bar(Vec<Box<dyn Foo>>); impl Bar { fn
(&self) -> impl Iterator<Item = Box<dyn Foo>> { Iter { current: None, remaining: self.0.iter(), //~ ERROR cannot infer an appropriate lifetime } } } struct Baz(Vec<Box<dyn Foo>>); impl Baz { fn iter(&self) -> impl Iterator<Item = Box<dyn Foo>> + '_ { Iter { current: None, remaining: self.0.iter(), //~ ERROR cannot infer an appropriate lifetime } } } struct Bat(Vec<Box<dyn Foo>>); impl Bat { fn iter<'a>(&'a self) -> impl Iterator<Item = Box<dyn Foo>> + 'a { Iter { current: None, remaining: self.0.iter(), //~ ERROR cannot infer an appropriate lifetime } } } struct Ban(Vec<Box<dyn Foo>>); impl Ban { fn iter<'a>(&'a self) -> impl Iterator<Item = Box<dyn Foo>> { Iter { current: None, remaining: self.0.iter(), //~ ERROR cannot infer an appropriate lifetime } } } fn main() {}
iter
main.js
import './polyfills'; import Node from './Node'; import {insert} from './utils'; export const delimiter = ' \\ '; /** * Checks whether namespace exists and matches the custom field namespace * @param {Object} field * @param {string} namespace - user provided namespace * @returns {Boolean} */ const belongsToNamespace = (field, namespace) => namespace && field.name.trim().startsWith(namespace); /** * Removes namespace from all of the custom fields * @param {Array} customFields */ const removeNamespaces = (customFields) => customFields.map(field => field.substring(field.indexOf(delimiter)+delimiter.length)); /** * Get node formatted path * @param {Object} node */ const getPath = (node) => node.path.trim(); /** * Checks if a node list contains a certain node * @param {Array} nodeList * @param {Object} childNode */ const nodeExists = (nodeList, childNode) => nodeList.some(rootNode => getPath(rootNode) === getPath(childNode)) /** * Get the unique custom fields sorted based on the initial order and filtered based on the provided namespace * @param {Array} products * @param {string} namespace */ function formatCustomFields(products, namespace) { return products.reduce((fields, product) => { product.custom_fields.forEach((field, index) => { if (!fields.includes(field.name) && belongsToNamespace(field, namespace)) { fields = insert(fields, index, field.name); }
}); return fields; }, []); } class CustomFieldsHierarchy { constructor(customFields) { this.customFields = customFields; this.hierarchy = []; this.constructHierarchy(); } getHierarchy() { return this.hierarchy; } constructHierarchy() { this.customFields.forEach((field) => this.populateHierarchy(field)); } /** * Recursively traverses the hierarchy until it finds the desired parent node * @param {string} parentPath * @param {Array} nodeList * @returns {Node} */ getParent(parentPath, nodeList) { for (const currentNode of nodeList) { if (parentPath.trim() === getPath(currentNode)) { return currentNode; } const parent = this.getParent(parentPath, currentNode.children); if (parent) { return parent; } } } /** * Adds all the nodes to the hierarchy * @param {string} fullPath */ populateHierarchy(fullPath) { const fields = fullPath.split(delimiter); fields.forEach((field, index) => { const currentPath = fields.slice(0, index + 1); const node = new Node(field, currentPath.join(delimiter), []); if (index === 0 && !nodeExists(this.hierarchy, node)) { this.hierarchy.push(node); } else { this.populateChildren(fields, index, node); } }); } /** * Adds all the child nodes to the hierarchy * @param {Array} fields * @param {number} index * @param {Node} node */ populateChildren(fields, index, node) { const parentPath = fields.slice(0, index).join(delimiter); const parentNode = this.getParent(parentPath, this.hierarchy); if (parentNode && !nodeExists(parentNode.children, node)) { parentNode.children.push(node); } } } /** * Transforms products custom fields paths into Javascript object * @param {Array} products - products that are coming from BigCommerce context * @param {string} namespace - only transform custom fields which have this namespace * @returns {Array} */ export default function transformPathToHierarchy(products, namespace) { const customFields = removeNamespaces(formatCustomFields(products, namespace)); const hierarchy = new CustomFieldsHierarchy(customFields); return hierarchy.getHierarchy(); }
indexv2.js
//cell size (px) var cell_size = 40; let maze = [ 0x0E, 0x0A, 0x09, 0x0C, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x08, 0x0A, 0x0A, 0x0A, 0x08, 0x09, 0x0C, 0x09, 0x05, 0x06, 0x08, 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x06, 0x0A, 0x0A, 0x0A, 0x03, 0x05, 0x05, 0x05, 0x05, 0x0C, 0x02, 0x0B, 0x0E, 0x08, 0x0A, 0x0A, 0x08, 0x0A, 0x08, 0x08, 0x09, 0x05, 0x05, 0x04, 0x01, 0x06, 0x08, 0x0A, 0x09, 0x04, 0x0A, 0x0A, 0x00, 0x0A, 0x03, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x09, 0x06, 0x09, 0x05, 0x04, 0x0A, 0x0A, 0x02, 0x0A, 0x0B, 0x05, 0x05, 0x05, 0x05, 0x04, 0x03, 0x06, 0x0A, 0x02, 0x03, 0x06, 0x0A, 0x0A, 0x0A, 0x0A, 0x09, 0x05, 0x05, 0x05, 0x05, 0x05, 0x0D, 0x0D, 0x0D, 0x0C, 0x08, 0x0A, 0x0A, 0x0A, 0x0A, 0x09, 0x05, 0x05, 0x05, 0x05, 0x06, 0x03, 0x04, 0x01, 0x04, 0x01, 0x05, 0x0C, 0x09, 0x0C, 0x08, 0x01, 0x05, 0x05, 0x05, 0x05, 0x0C, 0x08, 0x01, 0x06, 0x01, 0x05, 0x04, 0x02, 0x03, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x0D, 0x06, 0x01, 0x05, 0x0C, 0x0A, 0x01, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x09, 0x06, 0x03, 0x06, 0x0A, 0x02, 0x00, 0x03, 0x05, 0x04, 0x03, 0x05, 0x05, 0x04, 0x03, 0x05, 0x05, 0x0C, 0x0A, 0x0A, 0x08, 0x09, 0x04, 0x0A, 0x01, 0x05, 0x0D, 0x05, 0x05, 0x05, 0x0D, 0x05, 0x05, 0x04, 0x0A, 0x08, 0x03, 0x05, 0x06, 0x0A, 0x03, 0x05, 0x04, 0x01, 0x05, 0x05, 0x04, 0x01, 0x04, 0x03, 0x0C, 0x02, 0x0B, 0x06, 0x08, 0x0A, 0x0A, 0x03, 0x05, 0x05, 0x05, 0x06, 0x01, 0x07, 0x06, 0x08, 0x02, 0x0A, 0x0A, 0x0B, 0x06, 0x08, 0x0A, 0x0A, 0x00, 0x01, 0x06, 0x0A, 0x02, 0x0A, 0x0A, 0x02, 0x0B, 0x0E, 0x0A, 0x0A, 0x0A, 0x02, 0x0A, 0x0A, 0x03, 0x07 ]; let see = [ 0x0e, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x09, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03 ]; //16x16 bool array for visited let visited = Array(16).fill().map(() => Array(16).fill(false)); visited[0][0] = true; //16x16 array for weights let array = Array(16).fill().map(() => Array(16).fill(0)); let stack = []; //init floodfill array for (let i=0; i<16; i++) {
} String.prototype.lpad = function(padString, length) { var str = this; while (str.length < length) str = padString + str; return str; } let mouse = { pos: [0, 0], //direction 0=up, 1=right, 2=down, 3=left dir: 2, stepsTaken: 0, moveTo: function(x, y) { this.pos = [x, y]; }, moveForward: function() { switch (this.dir) { case 0: this.pos[1]--; break; case 1: this.pos[0]++; break; case 2: this.pos[1]++; break; case 3: this.pos[0]--; break; } this.stepsTaken++; }, turnRight: function() { this.dir = (this.dir + 1) % 4; }, turnLeft: function() { //+3 instead of -1 to avoid negative number for modulo this.dir = (this.dir + 3) % 4; }, draw: function(ctx) { let x = this.pos[0] * cell_size + cell_size/2; let y = this.pos[1] * cell_size + cell_size/2; ctx.beginPath(); ctx.arc(x, y, cell_size * 0.35, 0, 2 * Math.PI); ctx.closePath(); switch (this.dir) { case 0: ctx.moveTo(x, y - cell_size * 0.1); ctx.lineTo(x, y - cell_size * 0.45); break; case 1: ctx.moveTo(x + cell_size * 0.1, y); ctx.lineTo(x + cell_size * 0.45, y); break; case 2: ctx.moveTo(x, y + cell_size * 0.1); ctx.lineTo(x, y + cell_size * 0.45); break; case 3: ctx.moveTo(x - cell_size * 0.1, y); ctx.lineTo(x - cell_size * 0.45, y); break; } ctx.stroke(); } }; function updateCanvas() { //first canvas let c = document.getElementById("canvas"); let ctx = c.getContext('2d'); ctx.canvas.height = cell_size * 16; ctx.canvas.width = cell_size * 16; ctx.font = `${cell_size/2}px Arial`; //current position mouse.draw(ctx); //maze walls and weights ctx.fillStyle = "black"; for (let i=0; i<16; i++) { for (let j=0; j<16; j++) { //binary representation let bin = maze[16*i+j].toString(2); bin = bin.lpad('0', 4); //North if (bin.charAt(bin.length - 3) == "1") { ctx.moveTo(i*cell_size, j*cell_size); ctx.lineTo((i+1)*cell_size, j*cell_size); ctx.stroke(); } //East if (bin.charAt(bin.length - 2) == "1") { ctx.moveTo((i+1)*cell_size, j*cell_size); ctx.lineTo((i+1)*cell_size, (j+1)*cell_size); ctx.stroke(); } //South if (bin.charAt(bin.length - 1) == "1") { ctx.moveTo((i)*cell_size, (j+1)*cell_size); ctx.lineTo((i+1)*cell_size, (j+1)*cell_size); ctx.stroke(); } //West if (bin.charAt(bin.length - 4) == "1") { ctx.moveTo((i)*cell_size, (j)*cell_size); ctx.lineTo((i)*cell_size, (j+1)*cell_size); ctx.stroke(); } } } //second canvas c = document.getElementById("canvas1"); ctx = c.getContext('2d'); ctx.canvas.height = cell_size * 16; ctx.canvas.width = cell_size * 16; ctx.font = `${cell_size/2}px Arial`; //maze walls and weights ctx.fillStyle = "black"; for (let i=0; i<16; i++) { for (let j=0; j<16; j++) { //binary representation let bin = see[16*i+j].toString(2); //North if (bin.charAt(bin.length - 3) == "1") { ctx.moveTo(i*cell_size, j*cell_size); ctx.lineTo((i+1)*cell_size, j*cell_size); ctx.stroke(); } //East if (bin.charAt(bin.length - 2) == "1") { ctx.moveTo((i+1)*cell_size, j*cell_size); ctx.lineTo((i+1)*cell_size, (j+1)*cell_size); ctx.stroke(); } //South if (bin.charAt(bin.length - 1) == "1") { ctx.moveTo((i)*cell_size, (j+1)*cell_size); ctx.lineTo((i+1)*cell_size, (j+1)*cell_size); ctx.stroke(); } //West if (bin.charAt(bin.length - 4) == "1") { ctx.moveTo((i)*cell_size, (j)*cell_size); ctx.lineTo((i)*cell_size, (j+1)*cell_size); ctx.stroke(); } if (visited[i][j]) { ctx.fillStyle = "YellowGreen"; } else { ctx.fillStyle = "Orange"; } ctx.fillRect(cell_size*i + 1, cell_size*j + 1, cell_size - 2, cell_size - 2); ctx.fillStyle = "Black"; ctx.fillText(array[i][j], cell_size*i + cell_size/4, cell_size*j + cell_size*3/4); } } } function Step() { while (stack.length > 0) { let cell = stack.pop(); let val = array[cell[0]][cell[1]]; let bin = see[16*cell[0]+cell[1]].toString(2).lpad('0', 4); let min_val = 256; //North if (bin.charAt(bin.length - 3) == "0") { if (array[cell[0]][cell[1] - 1] < min_val) { min_val = array[cell[0]][cell[1] - 1]; } } //East if (bin.charAt(bin.length - 2) == "0") { if (array[cell[0] + 1][cell[1]] < min_val) { min_val = array[cell[0] + 1][cell[1]]; } } //South if (bin.charAt(bin.length - 1) == "0") { if (array[cell[0]][cell[1] + 1] < min_val) { min_val = array[cell[0]][cell[1] + 1]; } } //West if (bin.charAt(bin.length - 4) == "0") { if (array[cell[0] - 1][cell[1]] < min_val) { min_val = array[cell[0] - 1][cell[1]]; } } if (val != min_val + 1) { array[cell[0]][cell[1]] = min_val + 1; //push to stack //north if (bin.charAt(bin.length - 3) == "0") { stack.push([cell[0], cell[1] - 1]); } //East if (bin.charAt(bin.length - 2) == "0") { stack.push([cell[0] + 1, cell[1]]); } //South if (bin.charAt(bin.length - 1) == "0") { stack.push([cell[0], cell[1] + 1]); } //West if (bin.charAt(bin.length - 4) == "0") { stack.push([cell[0] - 1, cell[1]]); } } } } function Move() { let cell = mouse.pos; stack.push([cell[0], cell[1]]); Step(); let bin = see[16*cell[0]+cell[1]].toString(2).lpad('0', 4); let min_val = 256; let min_direction = -1; let go_straight = false; //North if (bin.charAt(1) == "0") { if (array[cell[0]][cell[1] - 1] < min_val) { min_val = array[cell[0]][cell[1] - 1]; min_direction = 0; } } //East if (bin.charAt(2) == "0") { if (array[cell[0] + 1][cell[1]] < min_val) { min_val = array[cell[0] + 1][cell[1]]; min_direction = 1; go_straight = false; } else if (array[cell[0] + 1][cell[1]] == min_val && mouse.dir == 1) { go_straight = true; } } //South if (bin.charAt(3) == "0") { if (array[cell[0]][cell[1] + 1] < min_val) { min_val = array[cell[0]][cell[1] + 1]; min_direction = 2; go_straight = false; } else if (array[cell[0]][cell[1] + 1] == min_val && mouse.dir == 2) { go_straight = true; } } //West if (bin.charAt(0) == "0") { if (array[cell[0] - 1][cell[1]] < min_val) { min_val = array[cell[0] - 1][cell[1]]; min_direction = 3; go_straight = false; } else if (array[cell[0] - 1][cell[1]] == min_val && mouse.dir == 3) { go_straight = true; } } if (min_direction == mouse.dir || go_straight) { mouse.moveForward(); } else { while (min_direction != mouse.dir) { mouse.turnRight(); } mouse.moveForward(); } //new mouse position cell = mouse.pos; visited[cell[0]][cell[1]] = true; bin = maze[16*cell[0]+cell[1]].toString(2).lpad('0', 4); see_bin = see[16*cell[0]+cell[1]].toString(2).lpad('0', 4); //if wall exists but not seen, add wall on both sides //North if (bin.charAt(1) == "1" && see_bin.charAt(1) == "0") { see[(cell[0])*16+cell[1]-1] = see[(cell[0])*16+cell[1]-1] | 0b0001; } //East if (bin.charAt(2) == "1" && see_bin.charAt(2) == "0") { see[(cell[0]+1)*16+(cell[1])] = see[(cell[0]+1)*16+(cell[1])] | 0b1000; } //South if (bin.charAt(3) == "1" && see_bin.charAt(3) == "0") { see[(cell[0])*16+cell[1]+1] = see[(cell[0])*16+cell[1]+1] | 0b0100; } //West if (bin.charAt(0) == "1" && see_bin.charAt(0) == "0") { see[(cell[0]-1)*16+(cell[1])] = see[(cell[0]-1)*16+(cell[1])] | 0b0010; } see[cell[0]*16+cell[1]] = maze[cell[0]*16+cell[1]]; Step(); updateCanvas(); if (array[cell[0]][cell[1]] == 0) { mouse.moveTo(0, 0); console.log("Steps taken: " + mouse.stepsTaken); mouse.stepsTaken = 0; visited = Array(16).fill().map(() => Array(16).fill(false)); visited[0][0] = true; updateCanvas(); } } function ZoomIn() { cell_size += 5; updateCanvas(); } function ZoomOut() { cell_size -= 5; updateCanvas(); } document.onload = updateCanvas();
for (let j=0; j<16; j++) { array[i][j] = Math.round(Math.abs(i-7.5) + Math.abs(j-7.5) - 1); }
database.rs
use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::path::Path; use std::sync::{Arc, RwLock}; use std::{fs, thread}; use chrono::{DateTime, Utc}; use crossbeam_channel::{Receiver, Sender}; use heed::types::{Str, Unit, SerdeBincode}; use heed::CompactionOption; use log::{debug, error}; use meilisearch_schema::Schema; use crate::{store, update, Index, MResult, Error}; pub type BoxUpdateFn = Box<dyn Fn(&str, update::ProcessedUpdateResult) + Send + Sync + 'static>; type ArcSwapFn = arc_swap::ArcSwapOption<BoxUpdateFn>; type SerdeDatetime = SerdeBincode<DateTime<Utc>>; pub type MainWriter<'a> = heed::RwTxn<'a, MainT>; pub type MainReader = heed::RoTxn<MainT>; pub type UpdateWriter<'a> = heed::RwTxn<'a, UpdateT>; pub type UpdateReader = heed::RoTxn<UpdateT>; const UNHEALTHY_KEY: &str = "_is_unhealthy"; const LAST_UPDATE_KEY: &str = "last-update"; pub struct MainT; pub struct UpdateT; pub struct Database { env: heed::Env, update_env: heed::Env, common_store: heed::PolyDatabase, indexes_store: heed::Database<Str, Unit>, indexes: RwLock<HashMap<String, (Index, thread::JoinHandle<MResult<()>>)>>, update_fn: Arc<ArcSwapFn>, } pub struct DatabaseOptions { pub main_map_size: usize, pub update_map_size: usize, } impl Default for DatabaseOptions { fn default() -> DatabaseOptions { DatabaseOptions { main_map_size: 100 * 1024 * 1024 * 1024, //100Gb update_map_size: 100 * 1024 * 1024 * 1024, //100Gb } } } macro_rules! r#break_try { ($expr:expr, $msg:tt) => { match $expr { core::result::Result::Ok(val) => val, core::result::Result::Err(err) => { log::error!(concat!($msg, ": {}"), err); break; } } }; } pub enum UpdateEvent { NewUpdate, MustClear, } pub type UpdateEvents = Receiver<UpdateEvent>; pub type UpdateEventsEmitter = Sender<UpdateEvent>; fn update_awaiter( receiver: UpdateEvents, env: heed::Env, update_env: heed::Env, index_uid: &str, update_fn: Arc<ArcSwapFn>, index: Index, ) -> MResult<()> { let mut receiver = receiver.into_iter(); while let Some(event) = receiver.next() { // if we receive a *MustClear* event, clear the index and break the loop if let UpdateEvent::MustClear = event { let mut writer = env.typed_write_txn::<MainT>()?; let mut update_writer = update_env.typed_write_txn::<UpdateT>()?; store::clear(&mut writer, &mut update_writer, &index)?; writer.commit()?; update_writer.commit()?; debug!("store {} cleared", index_uid); break } loop { // We instantiate a *write* transaction to *block* the thread // until the *other*, notifiying, thread commits let result = update_env.typed_write_txn::<UpdateT>(); let update_reader = break_try!(result, "LMDB read transaction (update) begin failed"); // retrieve the update that needs to be processed let result = index.updates.first_update(&update_reader); let (update_id, update) = match break_try!(result, "pop front update failed") { Some(value) => value, None => { debug!("no more updates"); break; } }; // do not keep the reader for too long break_try!(update_reader.abort(), "aborting update transaction failed"); // instantiate a transaction to touch to the main env let result = env.typed_write_txn::<MainT>(); let mut main_writer = break_try!(result, "LMDB nested write transaction failed"); // try to apply the update to the database using the main transaction let result = update::update_task(&mut main_writer, &index, update_id, update); let status = break_try!(result, "update task failed"); // commit the main transaction if the update was successful, abort it otherwise if status.error.is_none() { break_try!(main_writer.commit(), "commit nested transaction failed"); } else { break_try!(main_writer.abort(), "abborting nested transaction failed"); } // now that the update has been processed we can instantiate // a transaction to move the result to the updates-results store let result = update_env.typed_write_txn::<UpdateT>(); let mut update_writer = break_try!(result, "LMDB write transaction begin failed"); // definitely remove the update from the updates store index.updates.del_update(&mut update_writer, update_id)?; // write the result of the updates-results store let updates_results = index.updates_results; let result = updates_results.put_update_result(&mut update_writer, update_id, &status); // always commit the main transaction, even if the update was unsuccessful break_try!(result, "update result store commit failed"); break_try!(update_writer.commit(), "update transaction commit failed"); // call the user callback when the update and the result are written consistently if let Some(ref callback) = *update_fn.load() { (callback)(index_uid, status); } } } debug!("update loop system stopped"); Ok(()) } impl Database { pub fn open_or_create(path: impl AsRef<Path>, options: DatabaseOptions) -> MResult<Database> { let main_path = path.as_ref().join("main"); let update_path = path.as_ref().join("update"); fs::create_dir_all(&main_path)?; let env = heed::EnvOpenOptions::new() .map_size(options.main_map_size) .max_dbs(3000) .open(main_path)?; fs::create_dir_all(&update_path)?; let update_env = heed::EnvOpenOptions::new() .map_size(options.update_map_size) .max_dbs(3000) .open(update_path)?; let common_store = env.create_poly_database(Some("common"))?; let indexes_store = env.create_database::<Str, Unit>(Some("indexes"))?; let update_fn = Arc::new(ArcSwapFn::empty()); // list all indexes that needs to be opened let mut must_open = Vec::new(); let reader = env.read_txn()?; for result in indexes_store.iter(&reader)? { let (index_uid, _) = result?; must_open.push(index_uid.to_owned()); } reader.abort()?; // open the previously aggregated indexes let mut indexes = HashMap::new(); for index_uid in must_open { let (sender, receiver) = crossbeam_channel::unbounded(); let index = match store::open(&env, &update_env, &index_uid, sender.clone())? { Some(index) => index, None => { log::warn!( "the index {} doesn't exist or has not all the databases", index_uid ); continue; } }; let env_clone = env.clone(); let update_env_clone = update_env.clone(); let index_clone = index.clone(); let name_clone = index_uid.clone(); let update_fn_clone = update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); // send an update notification to make sure that // possible pre-boot updates are consumed sender.send(UpdateEvent::NewUpdate).unwrap(); let result = indexes.insert(index_uid, (index, handle)); assert!( result.is_none(), "The index should not have been already open" ); } Ok(Database { env, update_env, common_store, indexes_store, indexes: RwLock::new(indexes), update_fn, }) } pub fn open_index(&self, name: impl AsRef<str>) -> Option<Index> { let indexes_lock = self.indexes.read().unwrap(); match indexes_lock.get(name.as_ref()) { Some((index, ..)) => Some(index.clone()), None => None, } } pub fn is_indexing(&self, reader: &UpdateReader, index: &str) -> MResult<Option<bool>> { match self.open_index(&index) { Some(index) => index.current_update_id(&reader).map(|u| Some(u.is_some())), None => Ok(None), } } pub fn create_index(&self, name: impl AsRef<str>) -> MResult<Index> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.entry(name.to_owned()) { Entry::Occupied(_) => Err(crate::Error::IndexAlreadyExists),
Entry::Vacant(entry) => { let (sender, receiver) = crossbeam_channel::unbounded(); let index = store::create(&self.env, &self.update_env, name, sender)?; let mut writer = self.env.typed_write_txn::<MainT>()?; self.indexes_store.put(&mut writer, name, &())?; index.main.put_name(&mut writer, name)?; index.main.put_created_at(&mut writer)?; index.main.put_updated_at(&mut writer)?; index.main.put_schema(&mut writer, &Schema::new())?; let env_clone = self.env.clone(); let update_env_clone = self.update_env.clone(); let index_clone = index.clone(); let name_clone = name.to_owned(); let update_fn_clone = self.update_fn.clone(); let handle = thread::spawn(move || { update_awaiter( receiver, env_clone, update_env_clone, &name_clone, update_fn_clone, index_clone, ) }); writer.commit()?; entry.insert((index.clone(), handle)); Ok(index) } } } pub fn delete_index(&self, name: impl AsRef<str>) -> MResult<bool> { let name = name.as_ref(); let mut indexes_lock = self.indexes.write().unwrap(); match indexes_lock.remove_entry(name) { Some((name, (index, handle))) => { // remove the index name from the list of indexes // and clear all the LMDB dbi let mut writer = self.env.write_txn()?; self.indexes_store.delete(&mut writer, &name)?; writer.commit()?; // send a stop event to the update loop of the index index.updates_notifier.send(UpdateEvent::MustClear).unwrap(); drop(indexes_lock); // join the update loop thread to ensure it is stopped handle.join().unwrap()?; Ok(true) } None => Ok(false), } } pub fn set_update_callback(&self, update_fn: BoxUpdateFn) { let update_fn = Some(Arc::new(update_fn)); self.update_fn.swap(update_fn); } pub fn unset_update_callback(&self) { self.update_fn.swap(None); } pub fn main_read_txn(&self) -> MResult<MainReader> { Ok(self.env.typed_read_txn::<MainT>()?) } pub(crate) fn main_write_txn(&self) -> MResult<MainWriter> { Ok(self.env.typed_write_txn::<MainT>()?) } /// Calls f providing it with a writer to the main database. After f is called, makes sure the /// transaction is commited. Returns whatever result f returns. pub fn main_write<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&mut MainWriter) -> Result<R, E>, E: From<Error>, { let mut writer = self.main_write_txn()?; let result = f(&mut writer)?; writer.commit().map_err(Error::Heed)?; Ok(result) } /// provides a context with a reader to the main database. experimental. pub fn main_read<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&MainReader) -> Result<R, E>, E: From<Error>, { let reader = self.main_read_txn()?; let result = f(&reader)?; reader.abort().map_err(Error::Heed)?; Ok(result) } pub fn update_read_txn(&self) -> MResult<UpdateReader> { Ok(self.update_env.typed_read_txn::<UpdateT>()?) } pub(crate) fn update_write_txn(&self) -> MResult<heed::RwTxn<UpdateT>> { Ok(self.update_env.typed_write_txn::<UpdateT>()?) } /// Calls f providing it with a writer to the main database. After f is called, makes sure the /// transaction is commited. Returns whatever result f returns. pub fn update_write<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&mut UpdateWriter) -> Result<R, E>, E: From<Error>, { let mut writer = self.update_write_txn()?; let result = f(&mut writer)?; writer.commit().map_err(Error::Heed)?; Ok(result) } /// provides a context with a reader to the update database. experimental. pub fn update_read<F, R, E>(&self, f: F) -> Result<R, E> where F: FnOnce(&UpdateReader) -> Result<R, E>, E: From<Error>, { let reader = self.update_read_txn()?; let result = f(&reader)?; reader.abort().map_err(Error::Heed)?; Ok(result) } pub fn copy_and_compact_to_path<P: AsRef<Path>>(&self, path: P) -> MResult<(File, File)> { let path = path.as_ref(); let env_path = path.join("main"); let env_update_path = path.join("update"); fs::create_dir(&env_path)?; fs::create_dir(&env_update_path)?; let env_path = env_path.join("data.mdb"); let env_file = self.env.copy_to_path(&env_path, CompactionOption::Enabled)?; let env_update_path = env_update_path.join("data.mdb"); match self.update_env.copy_to_path(env_update_path, CompactionOption::Enabled) { Ok(update_env_file) => Ok((env_file, update_env_file)), Err(e) => { fs::remove_file(env_path)?; Err(e.into()) }, } } pub fn indexes_uids(&self) -> Vec<String> { let indexes = self.indexes.read().unwrap(); indexes.keys().cloned().collect() } pub(crate) fn common_store(&self) -> heed::PolyDatabase { self.common_store } pub fn last_update(&self, reader: &heed::RoTxn<MainT>) -> MResult<Option<DateTime<Utc>>> { match self.common_store() .get::<_, Str, SerdeDatetime>(reader, LAST_UPDATE_KEY)? { Some(datetime) => Ok(Some(datetime)), None => Ok(None), } } pub fn set_last_update(&self, writer: &mut heed::RwTxn<MainT>, time: &DateTime<Utc>) -> MResult<()> { self.common_store() .put::<_, Str, SerdeDatetime>(writer, LAST_UPDATE_KEY, time)?; Ok(()) } pub fn set_healthy(&self, writer: &mut heed::RwTxn<MainT>) -> MResult<()> { let common_store = self.common_store(); common_store.delete::<_, Str>(writer, UNHEALTHY_KEY)?; Ok(()) } pub fn set_unhealthy(&self, writer: &mut heed::RwTxn<MainT>) -> MResult<()> { let common_store = self.common_store(); common_store.put::<_, Str, Unit>(writer, UNHEALTHY_KEY, &())?; Ok(()) } pub fn get_health(&self, reader: &heed::RoTxn<MainT>) -> MResult<Option<()>> { let common_store = self.common_store(); Ok(common_store.get::<_, Str, Unit>(&reader, UNHEALTHY_KEY)?) } pub fn compute_stats(&self, writer: &mut MainWriter, index_uid: &str) -> MResult<()> { let index = match self.open_index(&index_uid) { Some(index) => index, None => { error!("Impossible to retrieve index {}", index_uid); return Ok(()); } }; let schema = match index.main.schema(&writer)? { Some(schema) => schema, None => return Ok(()), }; let all_documents_fields = index .documents_fields_counts .all_documents_fields_counts(&writer)?; // count fields frequencies let mut fields_frequency = HashMap::<_, usize>::new(); for result in all_documents_fields { let (_, attr, _) = result?; if let Some(field_id) = schema.indexed_pos_to_field_id(attr) { *fields_frequency.entry(field_id).or_default() += 1; } } // convert attributes to their names let frequency: HashMap<_, _> = fields_frequency .into_iter() .filter_map(|(a, c)| schema.name(a).map(|name| (name.to_string(), c))) .collect(); index .main .put_fields_distribution(writer, &frequency) } } #[cfg(test)] mod tests { use super::*; use crate::bucket_sort::SortResult; use crate::criterion::{self, CriteriaBuilder}; use crate::update::{ProcessedUpdateResult, UpdateStatus}; use crate::settings::Settings; use crate::{Document, DocumentId}; use serde::de::IgnoredAny; use std::sync::mpsc; #[test] fn valid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn invalid_updates() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Failed { content }) if content.error.is_some()); } #[test] fn ignored_words_too_long() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name"], "displayedAttributes": ["name"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "s̷̡̢̡̧̺̜̞͕͉͉͕̜͔̟̼̥̝͍̟̖͔͔̪͉̲̹̝̣̖͎̞̤̥͓͎̭̩͕̙̩̿̀̋̅̈́̌́̏̍̄̽͂̆̾̀̿̕̚̚͜͠͠ͅͅļ̵̨̨̨̰̦̻̳̖̳͚̬̫͚̦͖͈̲̫̣̩̥̻̙̦̱̼̠̖̻̼̘̖͉̪̜̠̙͖̙̩͔̖̯̩̲̿̽͋̔̿̍̓͂̍̿͊͆̃͗̔̎͐͌̾̆͗́̆̒̔̾̅̚̚͜͜ͅͅī̵̛̦̅̔̓͂͌̾́͂͛̎̋͐͆̽̂̋̋́̾̀̉̓̏̽́̑̀͒̇͋͛̈́̃̉̏͊̌̄̽̿̏̇͘̕̚̕p̶̧̛̛̖̯̗͕̝̗̭̱͙̖̗̟̟̐͆̊̂͐̋̓̂̈́̓͊̆͌̾̾͐͋͗͌̆̿̅͆̈́̈́̉͋̍͊͗̌̓̅̈̎̇̃̎̈́̉̐̋͑̃͘̕͘d̴̢̨̛͕̘̯͖̭̮̝̝̐̊̈̅̐̀͒̀́̈́̀͌̽͛͆͑̀̽̿͛̃̋̇̎̀́̂́͘͠͝ǫ̵̨̛̮̩̘͚̬̯̖̱͍̼͑͑̓̐́̑̿̈́̔͌̂̄͐͝ģ̶̧̜͇̣̭̺̪̺̖̻͖̮̭̣̙̻͒͊͗̓̓͒̀̀ͅ", }); additions.update_document(doc1); let mut update_writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn add_schema_attributes_at_end() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut update_writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut update_writer, settings).unwrap(); update_writer.commit().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut update_writer = db.update_write_txn().unwrap(); let _update_id = additions.finalize(&mut update_writer).unwrap(); update_writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description", "age", "sex"], "displayedAttributes": ["name", "description", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", "age": 21, "sex": "Male", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", "age": 23, "sex": "Male", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); // even try to search for a document let reader = db.main_read_txn().unwrap(); let SortResult {documents, .. } = index.query_builder().query(&reader, "21 ", 0..20).unwrap(); assert_matches!(documents.len(), 1); reader.abort().unwrap(); // try to introduce attributes in the middle of the schema let settings = { let data = r#" { "searchableAttributes": ["name", "description", "city", "age", "sex"], "displayedAttributes": ["name", "description", "city", "age", "sex"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); // check if it has been accepted let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); } #[test] fn deserialize_documents() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(0)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(1)) .unwrap(); assert!(document.is_some()); } #[test] fn partial_document_update() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description", "id"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<IgnoredAny> = index.document(&reader, None, DocumentId(25)).unwrap(); assert!(document.is_none()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(0)) .unwrap(); assert!(document.is_some()); let document: Option<IgnoredAny> = index .document(&reader, None, DocumentId(1)) .unwrap(); assert!(document.is_some()); reader.abort().unwrap(); let mut partial_additions = index.documents_partial_addition(); // DocumentId(7900334843754999545) let partial_doc1 = serde_json::json!({ "id": 123, "description": "I am the new Marvin", }); // DocumentId(8367468610878465872) let partial_doc2 = serde_json::json!({ "id": 234, "description": "I am the new Kevin", }); partial_additions.update_document(partial_doc1); partial_additions.update_document(partial_doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = partial_additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.iter().find(|id| *id == update_id); let update_reader = db.update_read_txn().unwrap(); let result = index.update_status(&update_reader, update_id).unwrap(); assert_matches!(result, Some(UpdateStatus::Processed { content }) if content.error.is_none()); update_reader.abort().unwrap(); let reader = db.main_read_txn().unwrap(); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(0)) .unwrap(); let new_doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "I am the new Marvin", }); assert_eq!(document, Some(new_doc1)); let document: Option<serde_json::Value> = index .document(&reader, None, DocumentId(1)) .unwrap(); let new_doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "I am the new Kevin", }); assert_eq!(document, Some(new_doc2)); } #[test] fn delete_index() { let dir = tempfile::tempdir().unwrap(); let database = Arc::new(Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap()); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let db_cloned = database.clone(); let update_fn = move |name: &str, update: ProcessedUpdateResult| { // try to open index to trigger a lock let _ = db_cloned.open_index(name); sender.send(update.update_id).unwrap() }; // create the index let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "searchableAttributes": ["name", "description"], "displayedAttributes": ["name", "description"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); // add documents to the index let mut additions = index.documents_addition(); let doc1 = serde_json::json!({ "id": 123, "name": "Marvin", "description": "My name is Marvin", }); let doc2 = serde_json::json!({ "id": 234, "name": "Kevin", "description": "My name is Kevin", }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // delete the index let deleted = database.delete_index("test").unwrap(); assert!(deleted); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let result = database.open_index("test"); assert!(result.is_none()); } #[test] fn check_number_ordering() { let dir = tempfile::tempdir().unwrap(); let database = Database::open_or_create(dir.path(), DatabaseOptions::default()).unwrap(); let db = &database; let (sender, receiver) = mpsc::sync_channel(100); let update_fn = move |_name: &str, update: ProcessedUpdateResult| { sender.send(update.update_id).unwrap() }; let index = database.create_index("test").unwrap(); database.set_update_callback(Box::new(update_fn)); let mut writer = db.main_write_txn().unwrap(); index.main.put_schema(&mut writer, &Schema::with_primary_key("id")).unwrap(); writer.commit().unwrap(); let settings = { let data = r#" { "rankingRules": [ "typo", "words", "proximity", "attribute", "wordsPosition", "exactness", "desc(release_date)" ], "searchableAttributes": ["name", "release_date"], "displayedAttributes": ["name", "release_date"] } "#; let settings: Settings = serde_json::from_str(data).unwrap(); settings.into_update().unwrap() }; let mut writer = db.update_write_txn().unwrap(); let _update_id = index.settings_update(&mut writer, settings).unwrap(); writer.commit().unwrap(); let mut additions = index.documents_addition(); // DocumentId(7900334843754999545) let doc1 = serde_json::json!({ "id": 123, "name": "Kevin the first", "release_date": -10000, }); // DocumentId(8367468610878465872) let doc2 = serde_json::json!({ "id": 234, "name": "Kevin the second", "release_date": 10000, }); additions.update_document(doc1); additions.update_document(doc2); let mut writer = db.update_write_txn().unwrap(); let update_id = additions.finalize(&mut writer).unwrap(); writer.commit().unwrap(); // block until the transaction is processed let _ = receiver.into_iter().find(|id| *id == update_id); let reader = db.main_read_txn().unwrap(); let schema = index.main.schema(&reader).unwrap().unwrap(); let ranked_map = index.main.ranked_map(&reader).unwrap().unwrap(); let criteria = CriteriaBuilder::new() .add( criterion::SortByAttr::lower_is_better(&ranked_map, &schema, "release_date") .unwrap(), ) .add(criterion::DocumentId) .build(); let builder = index.query_builder_with_criteria(criteria); let SortResult {documents, .. } = builder.query(&reader, "Kevin", 0..20).unwrap(); let mut iter = documents.into_iter(); assert_matches!( iter.next(), Some(Document { id: DocumentId(0), .. }) ); assert_matches!( iter.next(), Some(Document { id: DocumentId(1), .. }) ); assert_matches!(iter.next(), None); } }
bak_ssl_converter.py
import os, sys, re import shutil import optparse import shutil import pandas import numpy import subprocess import fnmatch from joblib import Parallel, delayed import multiprocessing from Bio import SeqIO import glob ##################################### #This is the script to produce SSL file outputs for skyline spectral library construction # #Fraction parsing is taken from from after the final "-" in the file name. For example, "2015-10-05-wb-HEK293-BioRep1-F1.mzML" #would belong to fraction "F1" # #VERSION 1.7.5 version="1.7.5" #DATE: 3/03/2016 date="3/03/2016" ##################################### print "-----------------------------------------------------------------------" print "Welcome to the SSL file converter for Galaxy, Wohlschlegel Lab UCLA" print "Written by William Barshop" print "Version: ",version print "Date: ",date def applyParallel(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(group) for name, group in dfGrouped) return pandas.concat(retLst) def applyParallelQuarter(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count()/4)(delayed(func)(group) for name, group in dfGrouped) return pandas.concat(retLst) def applyParallelHalf(dfGrouped, func): retLst = Parallel(n_jobs=multiprocessing.cpu_count()/2)(delayed(func)(group) for name, group in dfGrouped) return pandas.concat(retLst) def fido_filter(unfiltered):#dataframe,float arguments... #filtered=[] #print unfiltered,"This is it GOING IN...." #unfiltered.apply(fido_filter_row,axis=1) for index,eachrow in unfiltered.iterrows(): prot_q_vals = eachrow['protein q-values'].split(',') protein_ids = eachrow['protein id'].split(',') indicies = eachrow['peptide prot-indicies'].split(',') new_q_vals=[] new_prot_ids=[] new_indicies=[] for each_tuple in zip(protein_ids,prot_q_vals,indicies): if float(each_tuple[1])<=gfido_q_threshold: #KEEP IT new_q_vals.append(each_tuple[1]) new_prot_ids.append(each_tuple[0]) new_indicies.append(each_tuple[2]) else: pass # THROW IT OUT if len(new_prot_ids) >= 1: unfiltered.loc[index,'protein q-values']=",".join(new_q_vals) unfiltered.loc[index,'protein id']=",".join(new_prot_ids) unfiltered.loc[index,'indicies']=",".join(new_indicies) unfiltered.loc[index,'fido_filter']=1 #This means we'll keep it when we do our df filter... We'll end up dropping this column later. else: unfiltered.loc[index,'fido_filter']=0 #print changed,type(changed),"this is changed..." return unfiltered def fido_filter_row(eachrow): #print eachrow,"this is my row!",type(eachrow) #print str(gfido_q_threshold),"this is the threshold..." prot_q_vals = eachrow['protein q-values'].split(',') protein_ids = eachrow['protein id'].split(',') indicies = eachrow['peptide prot-indicies'].split(',') new_q_vals=[] new_prot_ids=[] new_indicies=[] for each_tuple in zip(protein_ids,prot_q_vals,indicies): if float(each_tuple[1])<=gfido_q_threshold: #KEEP IT new_q_vals.append(each_tuple[1]) new_prot_ids.append(each_tuple[0]) new_indicies.append(each_tuple[2]) else: pass # THROW IT OUT if len(new_prot_ids) >= 1: eachrow['protein q-values']=",".join(new_q_vals) eachrow['protein id']=",".join(new_prot_ids) eachrow['indicies']=",".join(new_indicies) eachrow['fido_filter']=1 #This means we'll keep it when we do our df filter... We'll end up dropping this column later. else: eachrow['fido_filter']=0 #################################### #Argument parsing! So much fun! #We'll use OptParse even though some #people really rave about argparse... # # # NB: With Optparse, if an option is # not specified, it will take a # value of None #################################### fractions=False #print sys.argv,"THESE ARE THE ARGS" parser = optparse.OptionParser() parser.add_option("--pout",action="store",type="string",dest="operation_folder") parser.add_option("--qthresh",action="store", type="float", dest="q_threshold") parser.add_option("--fidoq",action="store", type="float", dest="fido_q_threshold") parser.add_option("--fido_q_raw",action="store", type="string", dest="fido_q_raw") parser.add_option("--pRS_prob_thresh",action="store", type="float", dest="prs_prob_threshold") parser.add_option("--ptmRS_prob_thresh",action="store", type="float", dest="ptmrs_prob_threshold") parser.add_option("--FLRthresh",action="store", type="float", dest="flr_threshold") parser.add_option("--LFLR",action="store_true", dest="local_flr") parser.add_option("--mzml",action="store",type="string",dest="mzml_files") parser.add_option("--mass_corrected_mzml",action="store",type="string",dest="mc_mzml_files") parser.add_option("--blib",action="store_true",dest="blib") parser.add_option("--fasta",action="store",type="string",dest="fasta") parser.add_option("--ffasta",action="store",type="string",dest="ffasta") # THIS IS THE FILTERED FIDO OUTPUT parser.add_option("--expgroups",action="store",type="string",dest="exp_group_file") parser.add_option("--ssl",action="store",type="string",dest="ssl_output_folder") parser.add_option("--fractions",action="store_true",dest="fractions") parser.add_option("--OnePPS",action="store_true",dest="one_pps") parser.add_option("--only_mod",action="store_true",dest="only_mod") parser.add_option("--diaumpire",action="store_true",dest="diaumpire") parser.add_option("--no_mzml",action="store_true",dest="no_mzml") #parser.add_option("--saint",action="store",type="string",dest="saint_outputs") (options,args) = parser.parse_args() if options.fractions is True: fractions=True else: print "We'll give outputs by acquisition (runs), not by experimental set." #saint=False #if options.saint_outputs is not None: # saint=True # saint_interact,saint_bait,saint_prey=options.saint_outputs.split(",") #### Check for FIDO q-filter fido_q=False if options.fido_q_threshold is not None: fido_q=True global gfido_q_threshold #For paralell access via JobLib/Multiprocessing... gfido_q_threshold = options.fido_q_threshold print "We're going to filter by Fido Q-Value of ",options.fido_q_threshold #### Check for LuciPHOr luciphor=False if options.flr_threshold is not None: luciphor=True print "We will filter by LuciPHOr FLR of ",options.flr_threshold #### Check for PhosphoRS phosphoRS=False if options.prs_prob_threshold is not None: phosphoRS=True print "We will filter by a PhosphoRS probability of ",options.prs_prob_threshold #### Check for ptmRS ptmRS=False if options.ptmrs_prob_threshold is not None: ptmRS=True print "We will filter by a ptmRS probability of ",options.prs_prob_threshold psms_files=[] for root, subFolders, files in os.walk(options.operation_folder): for eachfile in files: if 'target.psms.txt' in eachfile and 'uncorrected' not in eachfile: psms_files.append(str(os.path.join(root,eachfile))) dataframe_vector=[] for eachfile in psms_files: newdf=pandas.DataFrame.from_csv(eachfile,sep='\t',index_col=False) dataframe_vector.append(newdf) combined_results=pandas.concat(dataframe_vector) del dataframe_vector group_information = pandas.read_csv(options.exp_group_file,sep='\t',dtype={'Fractionation Group ID String': object,'Fractionation Group Name':object,'Biological Condition':object}) run_dict={} # Key is file_idx, value is file_name.mzML rev_run_dict={} #Key is file_nzme.mzML, value is file_idx group_to_run_dict={} # Key is group, value is [1, 2, 3, 4] list of file_idx belonging to runs in the group... run_to_group_dict={} # Key is file_idx, value is group... group_to_file_name={} # key is group, value is ["xxxx.mzML", "xxxx.mzML"] if fractions: fractions_to_run_dict={} for index,row in group_information.iterrows(): print row run_dict[str(row['Crux File Integer'])]=row['Original File Name']+".mzML" rev_run_dict[row['Original File Name']+".mzML"]=str(row['Crux File Integer']) if row['Fractionation Group ID String'] in group_to_run_dict: group_to_run_dict[row['Fractionation Group ID String']].append(str(row['Crux File Integer'])) else: group_to_run_dict[row['Fractionation Group ID String']] = [str(row['Crux File Integer'])] if str(row['Crux File Integer']) in run_to_group_dict: #run_to_group_dict[str(row['Crux File Integer'])].append(row['Fractionation Group ID String']) print "YOU HAVE MULTIPLE COPIES OF A RUN IN THE EXPERIMENTAL INFORMATION FILE... WARNING!" else: run_to_group_dict[str(row['Crux File Integer'])]=row['Fractionation Group ID String'] if row['Fractionation Group ID String'] in group_to_file_name: group_to_file_name[row['Fractionation Group ID String']].append(str(row['Original File Name'])+".mzML") else: group_to_file_name[row['Fractionation Group ID String']] = [str(row['Original File Name'])+".mzML"] if fractions: fraction_tag=str(row['Original File Name'].rsplit("-",1)[1]) if fraction_tag in fractions_to_run_dict: fractions_to_run_dict[fraction_tag].append(str(row['Crux File Integer']))#str(row['Original File Name'])+".mzML") else: fractions_to_run_dict[fraction_tag]=[str(row['Crux File Integer'])] combined_results['file_idx']=combined_results['file_idx'].astype(str) combined_results['file']=combined_results['file'].astype(str) ####################### We'll handle putting in the file names just to be sure this has been handled! fix_combined=[] print run_dict,"This is run dict" print rev_run_dict,"this is rev dict" for each_idx in run_dict: mask = combined_results[(combined_results.file.str.contains(run_dict[each_idx]))] # MODIFIED.... mask['file']=run_dict[each_idx] fix_combined.append(mask) combined_results=pandas.concat(fix_combined) if options.diaumpire: print "DIAUmpire inputs: Decrementing scans to match mzML file index." combined_results['scan']=combined_results['scan']-1 if luciphor: #print combined_results combined_results['luci_numPPS']=combined_results['luci_numPPS'].fillna(0) combined_results['luci_numPPS']=combined_results['luci_numPPS'].astype(int).fillna(0) combined_results['luci_globalFLR']=combined_results['luci_globalFLR'].astype(float).fillna(0.0) combined_results['luci_localFLR']=combined_results['luci_localFLR'].astype(float).fillna(0.0) if phosphoRS: combined_results['pRS_peptideLocalizationProbability']=combined_results['pRS_peptideLocalizationProbability'].astype(float).fillna(1.00) combined_results['pRS_numPPS']=combined_results['pRS_numPPS'].astype(int).fillna(0) if ptmRS: combined_results['ptmRS_peptideLocalizationProbability']=combined_results['ptmRS_peptideLocalizationProbability'].astype(float).fillna(1.00) combined_results['ptmRS_totalNumPPS']=combined_results['ptmRS_totalNumPPS'].fillna(0).astype(int) #for i,each in combined_results.iterrows(): new_results={} # KEY IS GROUP NAME results_per_run={} # KEY IS FILE_IDX #print "tehse are items in group to run dict",group_to_run_dict ################################################## THIS BLOCK OF CODE BELOW DOES NOT WORK WELL... FILTERING BY "IN" and for FILE IDX IS A BAD IDEA SINCE "1" IS ALSO IN "10" and "11"... STUPID. #for each_group in group_to_run_dict: # bool_filter=combined_results.copy(deep=True) # bool_filter['file_idx']=bool_filter['file_idx'].astype(str) # #mask8=combined_results[(bool_filter.file_idx == "8" )] # mask = combined_results[(combined_results.file_idx.str.contains('|'.join(group_to_run_dict[each_group])))] #############Was this inappropriate set to file instead of file_idx? # new_results[each_group]=mask # results by group # for each_file in set(mask['file_idx']): # each_file_mask=mask[(mask.file_idx == each_file)] # results_per_run[str(each_file)]=each_file_mask # results by run for each_group in group_to_file_name: bool_filter=combined_results.copy(deep=True) #bool_filter['file_idx']=bool_filter['file_idx'].astype(str) #mask8=combined_results[(bool_filter.file_idx == "8" )] mask = combined_results[(combined_results.file.str.contains('|'.join(group_to_file_name[each_group])))] new_results[each_group]=mask # results by group for each_file in set(mask['file']): each_file_mask=mask[(mask.file.str.contains(each_file))] results_per_run[str(rev_run_dict[each_file])]=each_file_mask # results by run #################################################### if options.one_pps is True: print "Allowing Unambiguous Localization Peptides Through..." basedir=os.getcwd() if not fractions: #This is for all times when we have 1-D runs to compare. for eachgroup in set(group_information['Fractionation Group ID String']): os.chdir(basedir) os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/") #os.mkdir(basedir+"/"+options.ssl_output_folder+"/"+eachgroup) #if not options.no_mzml: for eachfile in group_to_file_name[eachgroup]: #shutil.copy(os.path.join(basedir,eachfile),basedir+"/"+options.ssl_output_folder+"/"+eachgroup+"/"+eachfile) shutil.copy(os.path.join(basedir,eachfile),basedir+"/"+options.ssl_output_folder+"/"+eachfile) if not fractions: for eachrun in group_to_run_dict[eachgroup]: this_run_results=results_per_run[eachrun] this_run_results['protein q-values']=this_run_results['protein q-values'].astype(str) if fido_q: this_run_results['fido_filter']=1 this_run_grouped=this_run_results.groupby(numpy.arange(len(this_run_results))//multiprocessing.cpu_count()) this_run_results=applyParallelHalf(this_run_grouped,fido_filter) #print type(this_run_results),"and type" this_run_results=this_run_results[this_run_results['fido_filter']==1] this_run_results=this_run_results.drop('fido_filter',axis=1) del this_run_grouped if luciphor:
if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.luci_localFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_localFLR > options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.luci_localFLR <= options.flr_threshold)] type2_mask=this_run_results[( this_run_results.luci_localFLR > options.flr_threshold )] #Unambiguous and poorly localized. else: if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.luci_globalFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_globalFLR > options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.luci_globalFLR <= options.flr_threshold)] type2_mask=this_run_results[( this_run_results.luci_globalFLR > options.flr_threshold )] #Unambiguous and poorly localized. elif phosphoRS: if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ) , (this_run_results.pRS_numPPS - this_run_results.numModSites == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ), (this_run_results.pRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold)] type2_mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability < options.prs_prob_threshold )] #Unambiguous and poorly localized. elif ptmRS: #if options.one_pps is True: # mask=this_run_results[numpy.logical_or(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ) , (this_run_results.ptmRS_numPPS - this_run_results.numModSites == 0))] # type2_mask=this_run_results[numpy.logical_and(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ), (this_run_results.ptmRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized. #else: mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold)] type2_mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability < options.ptmrs_prob_threshold )] #Unambiguous and poorly localized. else: mask=this_run_results if options.only_mod is True: if luciphor: mask=mask[mask['luci_numRPS'] >= 1].copy(deep=True) elif phosphoRS: mask=mask[mask['numModSites'] >= 1].copy(deep=True) elif ptmRS: mask=mask[mask['ptmRS_numMods'] >= 1].copy(deep=True) if luciphor: ssl_df=mask[['file','scan','charge','luci_sequence','percolator q-value']] ssl_df.rename(columns={'luci_sequence':'sequence'}, inplace=True) elif phosphoRS: ssl_df=mask[['file','scan','charge','pRS_sequence','percolator q-value']] ssl_df.rename(columns={'pRS_sequence':'sequence'}, inplace=True) elif ptmRS: ssl_df=mask[['file','scan','charge','ptmRS_sequence','percolator q-value']] ssl_df.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True) else: ssl_df=mask[['file','scan','charge','sequence','percolator q-value']] ssl_df.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_filtered=ssl_df[(ssl_df['percolator qvalue']<=options.q_threshold)] #with open(basedir+"/"+options.ssl_output_folder+"/"+eachgroup+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer: with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer: ssl_df_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_filtered.to_csv(path_or_buf=ssl_writer,sep="\t",index=False,header=True) if luciphor: with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','luci_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'luci_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) elif phosphoRS: with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','pRS_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'pRS_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) elif ptmRS: with open(basedir+"/"+options.ssl_output_folder+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','ptmRS_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) if options.blib: print "Let's build some blib files..." os.chdir(basedir) os.chdir(basedir+"/"+options.ssl_output_folder) #cmd = '/galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy *.ssl combined_spectral_lib.blib' #cmd = 'ls *.ssl | /galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy -m 1000M combined_spectral_lib.blib' #cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M *.ssl redundant_spectral_lib.blib' #print "running command ",cmd #subprocess.call(cmd,shell=True) #filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe redundant_spectral_lib.blib filtered_spectral_lib.blib' #subprocess.call(filtercmd,shell=True) for file in glob.glob("*type2.ssl"): os.rename(file,file.split(".")[0]+".not_ssl") print "protected",file,"from inclusion in spectral lib..." cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M *.ssl combined_spectral_lib.blib' print "running command ",cmd subprocess.call(cmd,shell=True) for file in glob.glob("*.not_ssl"): os.rename(file,file.split(".")[0]+".ssl") filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe combined_spectral_lib.blib filtered.blib' subprocess.call(filtercmd,shell=True) if options.mc_mzml_files is not None: #and not options.no_mzml: for file in glob.glob("*.mzML"): os.remove(file) print "removing ",file os.chdir(basedir) for file in glob.glob("mc_*.mzML"): print "replacing ",file shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+file.split("_",1)[1]) else: #This is for when fractions is true... so we'll organize the output into fractions #fractions_to_run_dict[fraction_tag].append(str(row['Original File Name'])+".mzML") blib_cmds=[] filter_cmds=[] for eachfraction in fractions_to_run_dict: fraction_set=fractions_to_run_dict[eachfraction] for each_chrom_fraction in fraction_set: if not os.path.isdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"): os.mkdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/") #if not options.no_mzml: shutil.copy(os.path.join(basedir,run_dict[each_chrom_fraction]),basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[each_chrom_fraction]) for eachfraction in fractions_to_run_dict: os.chdir(basedir) fraction_set=fractions_to_run_dict[eachfraction] for eachrun in fraction_set: this_run_results=results_per_run[eachrun] if fido_q: this_run_results['fido_filter']=0 this_run_grouped=this_run_results.groupby(numpy.arange(len(this_run_results))//multiprocessing.cpu_count()) this_run_results=applyParallelHalf(this_run_grouped,fido_filter) print this_run_results.columns,"this is columns..." this_run_results=this_run_results[this_run_results['fido_filter']==1] this_run_results=this_run_results.drop('fido_filter',axis=1) del this_run_grouped #print "------------------------------" #print "each run is ",eachrun #print set(this_run_results['file_idx']),"idx set..." #print set(this_run_results['file']),"file set..." #print "------------------------------" os.chdir(basedir) os.chdir(options.operation_folder+str(run_to_group_dict[eachrun])+".pin_out/crux-output/") if luciphor: if options.local_flr is True: if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.luci_localFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_localFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.luci_localFLR <= options.flr_threshold)] type2_mask=this_run_results[( this_run_results.luci_localFLR >= options.flr_threshold )] #Unambiguous and poorly localized. else: if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.luci_globalFLR <= options.flr_threshold ) , (this_run_results.luci_numPPS - this_run_results.luci_numRPS == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.luci_globalFLR >= options.flr_threshold ), (this_run_results.luci_numPPS - this_run_results.luci_numRPS > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.luci_globalFLR <= options.flr_threshold)] type2_mask=this_run_results[( this_run_results.luci_globalFLR >= options.flr_threshold )] #Unambiguous and poorly localized. elif phosphoRS: if options.one_pps is True: mask=this_run_results[numpy.logical_or(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ) , (this_run_results.pRS_numPPS - this_run_results.numModSites == 0))] type2_mask=this_run_results[numpy.logical_and(( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold ), (this_run_results.pRS_numPPS - this_run_results.numModSites > 0))] #Unambiguous and poorly localized. else: mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability >= options.prs_prob_threshold)] type2_mask=this_run_results[( this_run_results.pRS_peptideLocalizationProbability < options.prs_prob_threshold )] #Unambiguous and poorly localized. elif ptmRS: #if options.one_pps is True: # mask=this_run_results[numpy.logical_or(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ) , (this_run_results.ptmRS_numPPS - this_run_results.ptmRS_numMods == 0))] # type2_mask=this_run_results[numpy.logical_and(( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold ), (this_run_results.ptmRS_numPPS - this_run_results.ptmRS_numMods > 0))] #Unambiguous and poorly localized. mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability >= options.ptmrs_prob_threshold)] type2_mask=this_run_results[( this_run_results.ptmRS_peptideLocalizationProbability < options.ptmrs_prob_threshold )] #Unambiguous and poorly localized. else: mask=this_run_results if options.only_mod is True: if luciphor: mask=mask[mask['luci_numRPS'] >= 1].copy(deep=True) elif phosphoRS: mask=mask[mask['numModSites'] >= 1].copy(deep=True) elif ptmRS: mask=mask[mask['ptmRS_numMods'] >= 1].copy(deep=True) if luciphor: ssl_df=mask[['file','scan','charge','luci_sequence','percolator q-value']] ssl_df.rename(columns={'luci_sequence':'sequence'}, inplace=True) elif phosphoRS: ssl_df=mask[['file','scan','charge','pRS_sequence','percolator q-value']] ssl_df.rename(columns={'pRS_sequence':'sequence'}, inplace=True) elif ptmRS: ssl_df=mask[['file','scan','charge','ptmRS_sequence','percolator q-value']] ssl_df.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True) else: ssl_df=mask[['file','scan','charge','sequence','percolator q-value']] ssl_df.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_filtered=ssl_df[(ssl_df['percolator qvalue']<=options.q_threshold)] with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+".ssl",'wb') as ssl_writer: ssl_df_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_filtered.to_csv(path_or_buf=ssl_writer,sep="\t",index=False,header=True) if luciphor: with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','luci_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'luci_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) elif phosphoRS: with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','pRS_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'pRS_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) elif ptmRS: with open(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+run_dict[eachrun].replace(".mzML","")+"_type2.ssl",'wb') as type2_ssl_writer: ssl_df_type2=type2_mask[['file','scan','charge','ptmRS_sequence','percolator q-value']] ssl_df_type2.rename(columns={'percolator q-value':'percolator qvalue'}, inplace=True) ssl_df_type2.rename(columns={'ptmRS_sequence':'sequence'}, inplace=True) ssl_df_type2_filtered=ssl_df_type2[(ssl_df_type2['percolator qvalue']<=options.q_threshold)] ssl_df_type2_filtered.rename(columns={'percolator qvalue':'score'}, inplace=True) ssl_df_type2_filtered['score-type']="PERCOLATOR QVALUE" ssl_df_type2_filtered.to_csv(path_or_buf=type2_ssl_writer,sep="\t",index=False,header=True) if options.blib: print "We're going to build blib files!" os.chdir(basedir) #os.chdir(basedir+"/"+options.ssl_output_folder) os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/") #cmd = '/galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy *.ssl '+eachfraction.replace("-","")+".ssl" #combined_spectral_lib.blib for file in glob.glob("*type2.ssl"): os.rename(file,file.split(".")[0]+".not_ssl") print "protected",file,"from inclusion in spectral lib..." command_folder=basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/" cmd = 'wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibBuild.exe -a milkyway-galaxy -m 1000M {0}*.ssl {0}combined_spectral_lib.blib'.format(command_folder) blib_cmds.append(cmd) print "storing command to run later",cmd #subprocess.call(cmd,shell=True) filtercmd='wine /galaxy-central/tools/wohl-proteomics/ssl_converter/skylineblib/BlibFilter.exe {0} {1}'.format(command_folder+"combined_spectral_lib.blib",command_folder+"filtered_spectral_lib.blib") filter_cmds.append(filtercmd) print "storing command for filter later",filtercmd #subprocess.call(filtercmd,shell=True) #cmd = 'ls *.ssl | /galaxy-central/tools/wohl-proteomics/ssl_converter/blibbuild/bin/BlibBuild -a milkyway-galaxy -m 1000M combined_spectral_lib.blib' #print "Running command ...",cmd #subprocess.call(cmd,shell=True) if fractions and options.blib: chunk_size=8 # Max concurrency... job_list=[blib_cmds[i:i + chunk_size] for i in range(0, len(blib_cmds), chunk_size)] for each_jobset in job_list: processes=[] for each_job in each_jobset: print "Running ...",each_job processes.append(subprocess.Popen(each_job,shell=True)) for each_proc in processes: each_proc.wait() job_list=[filter_cmds[i:i + chunk_size] for i in range(0, len(filter_cmds), chunk_size)] for each_jobset in job_list: processes=[] for each_job in each_jobset: print "Running Filter...",each_job processes.append(subprocess.Popen(each_job,shell=True)) for each_proc in processes: each_proc.wait() for eachfraction in fractions_to_run_dict: os.chdir(basedir) fraction_set=fractions_to_run_dict[eachfraction] for eachrun in fraction_set: os.chdir(basedir) os.chdir(basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/") if options.mc_mzml_files is not None: file_list=[file for file in glob.glob("*.mzML")] for file in file_list: os.remove(file) os.chdir(basedir) for file in glob.glob("mc_*.mzML"): if file.split("_",1)[1] in file_list: shutil.copy(file,basedir+"/"+options.ssl_output_folder+"/"+eachfraction.replace("-","")+"/"+file.split("_",1)[1]) for file in glob.glob("*.not_ssl"): os.rename(file,file.split(".")[0]+".ssl") os.chdir(basedir) if options.fido_q_threshold is not None: os.chdir(basedir) #While we're here, let's go ahead and handle database filtering! fido_q_df = pandas.read_csv(options.fido_q_raw,sep="\t") fido_q_df=fido_q_df[fido_q_df['q-value']<=options.fido_q_threshold] #filter down... proteins_to_keep=fido_q_df['protein group'].unique().tolist()#this is the list of proteins we want to keep. with open(options.fasta,'rb') as fasta_file: fasta_dict=SeqIO.to_dict(SeqIO.parse(fasta_file,"fasta")) new_fasta=[] for eachprotein in proteins_to_keep: if eachprotein in fasta_dict: new_fasta.append(fasta_dict[eachprotein]) #print new_fasta,len(new_fasta),len(proteins_to_keep) shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta) with open(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,'wb') as fasta_writer: SeqIO.write(new_fasta,fasta_writer,"fasta") print "FIDO filtered FASTA written!" if options.ffasta is not None: shutil.copy(basedir+"/"+options.ssl_output_folder+"/"+"FIDO_FILTERED_"+options.fasta,options.ffasta) print "Copied the filtered fasta to the output location specified..." else: os.chdir(basedir) shutil.copy(options.fasta,basedir+"/"+options.ssl_output_folder+"/"+"UNFILTERED_"+options.fasta) os.chdir(basedir) if options.no_mzml: mzml_files=[] for root, dirs, files in os.walk("."): for file_name in fnmatch.filter(files, '*.mzML'): mzml_files.append(os.path.join(root,file_name)) #print "will have to remove ",file_name for each_file in mzml_files: os.remove(each_file) print "All done!" sys.exit(0)
if options.local_flr is True:
empty.rs
#![no_main] #![no_std] extern crate msp430fr2355; extern crate panic_msp430; use core::cell::Cell; use core::cell::RefCell; use core::cell::UnsafeCell; use msp430_rt::entry; #[entry] fn main() -> !
{ let n = UnsafeCell::new(5); unsafe { *n.get() = 5; } let c = Cell::new(3); c.set(4); let r = RefCell::new(None); *r.borrow_mut() = Some(2); loop {} }
config.go
// Copyright 2016-present The Hugo Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hugolib import ( "fmt" "os" "path/filepath" "strings" "github.com/gohugoio/hugo/parser/metadecoders" "github.com/gohugoio/hugo/common/herrors" "github.com/gohugoio/hugo/common/hugo" "github.com/gohugoio/hugo/hugofs" "github.com/gohugoio/hugo/hugolib/paths" "github.com/pkg/errors" _errors "github.com/pkg/errors" "github.com/gohugoio/hugo/langs" "github.com/gohugoio/hugo/config" "github.com/gohugoio/hugo/config/privacy" "github.com/gohugoio/hugo/config/services" "github.com/gohugoio/hugo/helpers" "github.com/spf13/afero" "github.com/spf13/viper" ) // SiteConfig represents the config in .Site.Config. type SiteConfig struct { // This contains all privacy related settings that can be used to // make the YouTube template etc. GDPR compliant. Privacy privacy.Config // Services contains config for services such as Google Analytics etc. Services services.Config } func loadSiteConfig(cfg config.Provider) (scfg SiteConfig, err error) { privacyConfig, err := privacy.DecodeConfig(cfg) if err != nil { return } servicesConfig, err := services.DecodeConfig(cfg) if err != nil { return } scfg.Privacy = privacyConfig scfg.Services = servicesConfig return } // ConfigSourceDescriptor describes where to find the config (e.g. config.toml etc.). type ConfigSourceDescriptor struct { Fs afero.Fs // Path to the config file to use, e.g. /my/project/config.toml Filename string // The path to the directory to look for configuration. Is used if Filename is not // set or if it is set to a relative filename. Path string // The project's working dir. Is used to look for additional theme config. WorkingDir string // The (optional) directory for additional configuration files. AbsConfigDir string // production, development Environment string } func (d ConfigSourceDescriptor) configFilenames() []string { if d.Filename == "" { return []string{"config"} } return strings.Split(d.Filename, ",") } func (d ConfigSourceDescriptor) configFileDir() string { if d.Path != "" { return d.Path } return d.WorkingDir } // LoadConfigDefault is a convenience method to load the default "config.toml" config. func LoadConfigDefault(fs afero.Fs) (*viper.Viper, error)
var ErrNoConfigFile = errors.New("Unable to locate config file or config directory. Perhaps you need to create a new site.\n Run `hugo help new` for details.\n") // LoadConfig loads Hugo configuration into a new Viper and then adds // a set of defaults. func LoadConfig(d ConfigSourceDescriptor, doWithConfig ...func(cfg config.Provider) error) (*viper.Viper, []string, error) { if d.Environment == "" { d.Environment = hugo.EnvironmentProduction } var configFiles []string v := viper.New() l := configLoader{ConfigSourceDescriptor: d} v.AutomaticEnv() v.SetEnvPrefix("hugo") var cerr error for _, name := range d.configFilenames() { var filename string if filename, cerr = l.loadConfig(name, v); cerr != nil && cerr != ErrNoConfigFile { return nil, nil, cerr } configFiles = append(configFiles, filename) } if d.AbsConfigDir != "" { dirnames, err := l.loadConfigFromConfigDir(v) if err == nil { configFiles = append(configFiles, dirnames...) } cerr = err } if err := loadDefaultSettingsFor(v); err != nil { return v, configFiles, err } if cerr == nil { themeConfigFiles, err := l.loadThemeConfig(v) if err != nil { return v, configFiles, err } if len(themeConfigFiles) > 0 { configFiles = append(configFiles, themeConfigFiles...) } } // We create languages based on the settings, so we need to make sure that // all configuration is loaded/set before doing that. for _, d := range doWithConfig { if err := d(v); err != nil { return v, configFiles, err } } if err := loadLanguageSettings(v, nil); err != nil { return v, configFiles, err } return v, configFiles, cerr } type configLoader struct { ConfigSourceDescriptor } func (l configLoader) wrapFileInfoError(err error, fi os.FileInfo) error { rfi, ok := fi.(hugofs.RealFilenameInfo) if !ok { return err } return l.wrapFileError(err, rfi.RealFilename()) } func (l configLoader) loadConfig(configName string, v *viper.Viper) (string, error) { baseDir := l.configFileDir() var baseFilename string if filepath.IsAbs(configName) { baseFilename = configName } else { baseFilename = filepath.Join(baseDir, configName) } var filename string fileExt := helpers.ExtNoDelimiter(configName) if fileExt != "" { exists, _ := helpers.Exists(baseFilename, l.Fs) if exists { filename = baseFilename } } else { for _, ext := range []string{"toml", "yaml", "yml", "json"} { filenameToCheck := baseFilename + "." + ext exists, _ := helpers.Exists(filenameToCheck, l.Fs) if exists { filename = filenameToCheck fileExt = ext break } } } if filename == "" { return "", ErrNoConfigFile } m, err := config.FromFileToMap(l.Fs, filename) if err != nil { return "", l.wrapFileError(err, filename) } if err = v.MergeConfigMap(m); err != nil { return "", l.wrapFileError(err, filename) } return filename, nil } func (l configLoader) wrapFileError(err error, filename string) error { err, _ = herrors.WithFileContextForFile( err, filename, filename, l.Fs, herrors.SimpleLineMatcher) return err } func (l configLoader) newRealBaseFs(path string) afero.Fs { return hugofs.NewBasePathRealFilenameFs(afero.NewBasePathFs(l.Fs, path).(*afero.BasePathFs)) } func (l configLoader) loadConfigFromConfigDir(v *viper.Viper) ([]string, error) { sourceFs := l.Fs configDir := l.AbsConfigDir if _, err := sourceFs.Stat(configDir); err != nil { // Config dir does not exist. return nil, nil } defaultConfigDir := filepath.Join(configDir, "_default") environmentConfigDir := filepath.Join(configDir, l.Environment) var configDirs []string // Merge from least to most specific. for _, dir := range []string{defaultConfigDir, environmentConfigDir} { if _, err := sourceFs.Stat(dir); err == nil { configDirs = append(configDirs, dir) } } if len(configDirs) == 0 { return nil, nil } // Keep track of these so we can watch them for changes. var dirnames []string for _, configDir := range configDirs { err := afero.Walk(sourceFs, configDir, func(path string, fi os.FileInfo, err error) error { if fi == nil { return nil } if fi.IsDir() { dirnames = append(dirnames, path) return nil } name := helpers.Filename(filepath.Base(path)) item, err := metadecoders.Default.UnmarshalFileToMap(sourceFs, path) if err != nil { return l.wrapFileError(err, path) } var keyPath []string if name != "config" { // Can be params.jp, menus.en etc. name, lang := helpers.FileAndExtNoDelimiter(name) keyPath = []string{name} if lang != "" { keyPath = []string{"languages", lang} switch name { case "menu", "menus": keyPath = append(keyPath, "menus") case "params": keyPath = append(keyPath, "params") } } } root := item if len(keyPath) > 0 { root = make(map[string]interface{}) m := root for i, key := range keyPath { if i >= len(keyPath)-1 { m[key] = item } else { nm := make(map[string]interface{}) m[key] = nm m = nm } } } // Migrate menu => menus etc. config.RenameKeys(root) if err := v.MergeConfigMap(root); err != nil { return l.wrapFileError(err, path) } return nil }) if err != nil { return nil, err } } return dirnames, nil } func loadLanguageSettings(cfg config.Provider, oldLangs langs.Languages) error { defaultLang := cfg.GetString("defaultContentLanguage") var languages map[string]interface{} languagesFromConfig := cfg.GetStringMap("languages") disableLanguages := cfg.GetStringSlice("disableLanguages") if len(disableLanguages) == 0 { languages = languagesFromConfig } else { languages = make(map[string]interface{}) for k, v := range languagesFromConfig { for _, disabled := range disableLanguages { if disabled == defaultLang { return fmt.Errorf("cannot disable default language %q", defaultLang) } if strings.EqualFold(k, disabled) { v.(map[string]interface{})["disabled"] = true break } } languages[k] = v } } var ( languages2 langs.Languages err error ) if len(languages) == 0 { languages2 = append(languages2, langs.NewDefaultLanguage(cfg)) } else { languages2, err = toSortedLanguages(cfg, languages) if err != nil { return _errors.Wrap(err, "Failed to parse multilingual config") } } if oldLangs != nil { // When in multihost mode, the languages are mapped to a server, so // some structural language changes will need a restart of the dev server. // The validation below isn't complete, but should cover the most // important cases. var invalid bool if languages2.IsMultihost() != oldLangs.IsMultihost() { invalid = true } else { if languages2.IsMultihost() && len(languages2) != len(oldLangs) { invalid = true } } if invalid { return errors.New("language change needing a server restart detected") } if languages2.IsMultihost() { // We need to transfer any server baseURL to the new language for i, ol := range oldLangs { nl := languages2[i] nl.Set("baseURL", ol.GetString("baseURL")) } } } // The defaultContentLanguage is something the user has to decide, but it needs // to match a language in the language definition list. langExists := false for _, lang := range languages2 { if lang.Lang == defaultLang { langExists = true break } } if !langExists { return fmt.Errorf("site config value %q for defaultContentLanguage does not match any language definition", defaultLang) } cfg.Set("languagesSorted", languages2) cfg.Set("multilingual", len(languages2) > 1) multihost := languages2.IsMultihost() if multihost { cfg.Set("defaultContentLanguageInSubdir", true) cfg.Set("multihost", true) } if multihost { // The baseURL may be provided at the language level. If that is true, // then every language must have a baseURL. In this case we always render // to a language sub folder, which is then stripped from all the Permalink URLs etc. for _, l := range languages2 { burl := l.GetLocal("baseURL") if burl == nil { return errors.New("baseURL must be set on all or none of the languages") } } } return nil } func (l configLoader) loadThemeConfig(v1 *viper.Viper) ([]string, error) { themesDir := paths.AbsPathify(l.WorkingDir, v1.GetString("themesDir")) themes := config.GetStringSlicePreserveString(v1, "theme") themeConfigs, err := paths.CollectThemes(l.Fs, themesDir, themes) if err != nil { return nil, err } if len(themeConfigs) == 0 { return nil, nil } v1.Set("allThemes", themeConfigs) var configFilenames []string for _, tc := range themeConfigs { if tc.ConfigFilename != "" { configFilenames = append(configFilenames, tc.ConfigFilename) if err := l.applyThemeConfig(v1, tc); err != nil { return nil, err } } } return configFilenames, nil } func (l configLoader) applyThemeConfig(v1 *viper.Viper, theme paths.ThemeConfig) error { const ( paramsKey = "params" languagesKey = "languages" menuKey = "menus" ) v2 := theme.Cfg for _, key := range []string{paramsKey, "outputformats", "mediatypes"} { l.mergeStringMapKeepLeft("", key, v1, v2) } themeLower := strings.ToLower(theme.Name) themeParamsNamespace := paramsKey + "." + themeLower // Set namespaced params if v2.IsSet(paramsKey) && !v1.IsSet(themeParamsNamespace) { // Set it in the default store to make sure it gets in the same or // behind the others. v1.SetDefault(themeParamsNamespace, v2.Get(paramsKey)) } // Only add params and new menu entries, we do not add language definitions. if v1.IsSet(languagesKey) && v2.IsSet(languagesKey) { v1Langs := v1.GetStringMap(languagesKey) for k := range v1Langs { langParamsKey := languagesKey + "." + k + "." + paramsKey l.mergeStringMapKeepLeft(paramsKey, langParamsKey, v1, v2) } v2Langs := v2.GetStringMap(languagesKey) for k := range v2Langs { if k == "" { continue } langParamsKey := languagesKey + "." + k + "." + paramsKey langParamsThemeNamespace := langParamsKey + "." + themeLower // Set namespaced params if v2.IsSet(langParamsKey) && !v1.IsSet(langParamsThemeNamespace) { v1.SetDefault(langParamsThemeNamespace, v2.Get(langParamsKey)) } langMenuKey := languagesKey + "." + k + "." + menuKey if v2.IsSet(langMenuKey) { // Only add if not in the main config. v2menus := v2.GetStringMap(langMenuKey) for k, v := range v2menus { menuEntry := menuKey + "." + k menuLangEntry := langMenuKey + "." + k if !v1.IsSet(menuEntry) && !v1.IsSet(menuLangEntry) { v1.Set(menuLangEntry, v) } } } } } // Add menu definitions from theme not found in project if v2.IsSet(menuKey) { v2menus := v2.GetStringMap(menuKey) for k, v := range v2menus { menuEntry := menuKey + "." + k if !v1.IsSet(menuEntry) { v1.SetDefault(menuEntry, v) } } } return nil } func (configLoader) mergeStringMapKeepLeft(rootKey, key string, v1, v2 config.Provider) { if !v2.IsSet(key) { return } if !v1.IsSet(key) && !(rootKey != "" && rootKey != key && v1.IsSet(rootKey)) { v1.Set(key, v2.Get(key)) return } m1 := v1.GetStringMap(key) m2 := v2.GetStringMap(key) for k, v := range m2 { if _, found := m1[k]; !found { if rootKey != "" && v1.IsSet(rootKey+"."+k) { continue } m1[k] = v } } } func loadDefaultSettingsFor(v *viper.Viper) error { c, err := helpers.NewContentSpec(v) if err != nil { return err } v.RegisterAlias("indexes", "taxonomies") v.SetDefault("cleanDestinationDir", false) v.SetDefault("watch", false) v.SetDefault("metaDataFormat", "toml") v.SetDefault("contentDir", "content") v.SetDefault("layoutDir", "layouts") v.SetDefault("assetDir", "assets") v.SetDefault("staticDir", "static") v.SetDefault("resourceDir", "resources") v.SetDefault("archetypeDir", "archetypes") v.SetDefault("publishDir", "public") v.SetDefault("dataDir", "data") v.SetDefault("i18nDir", "i18n") v.SetDefault("themesDir", "themes") v.SetDefault("buildDrafts", false) v.SetDefault("buildFuture", false) v.SetDefault("buildExpired", false) v.SetDefault("environment", hugo.EnvironmentProduction) v.SetDefault("uglyURLs", false) v.SetDefault("verbose", false) v.SetDefault("ignoreCache", false) v.SetDefault("canonifyURLs", false) v.SetDefault("relativeURLs", false) v.SetDefault("removePathAccents", false) v.SetDefault("titleCaseStyle", "AP") v.SetDefault("taxonomies", map[string]string{"tag": "tags", "category": "categories"}) v.SetDefault("permalinks", make(PermalinkOverrides, 0)) v.SetDefault("sitemap", Sitemap{Priority: -1, Filename: "sitemap.xml"}) v.SetDefault("pygmentsStyle", "monokai") v.SetDefault("pygmentsUseClasses", false) v.SetDefault("pygmentsCodeFences", false) v.SetDefault("pygmentsUseClassic", false) v.SetDefault("pygmentsOptions", "") v.SetDefault("disableLiveReload", false) v.SetDefault("pluralizeListTitles", true) v.SetDefault("preserveTaxonomyNames", false) v.SetDefault("forceSyncStatic", false) v.SetDefault("footnoteAnchorPrefix", "") v.SetDefault("footnoteReturnLinkContents", "") v.SetDefault("newContentEditor", "") v.SetDefault("paginate", 10) v.SetDefault("paginatePath", "page") v.SetDefault("summaryLength", 70) v.SetDefault("blackfriday", c.BlackFriday) v.SetDefault("rSSUri", "index.xml") v.SetDefault("rssLimit", -1) v.SetDefault("sectionPagesMenu", "") v.SetDefault("disablePathToLower", false) v.SetDefault("hasCJKLanguage", false) v.SetDefault("enableEmoji", false) v.SetDefault("pygmentsCodeFencesGuessSyntax", false) v.SetDefault("defaultContentLanguage", "en") v.SetDefault("defaultContentLanguageInSubdir", false) v.SetDefault("enableMissingTranslationPlaceholders", false) v.SetDefault("enableGitInfo", false) v.SetDefault("ignoreFiles", make([]string, 0)) v.SetDefault("disableAliases", false) v.SetDefault("debug", false) v.SetDefault("disableFastRender", false) v.SetDefault("timeout", 10000) // 10 seconds v.SetDefault("enableInlineShortcodes", false) return nil }
{ v, _, err := LoadConfig(ConfigSourceDescriptor{Fs: fs, Filename: "config.toml"}) return v, err }
data_source_ssh_keys.go
package secretsmanager import ( "context" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceSshKeys() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceSshKeysRead, Schema: map[string]*schema.Schema{ "path": { Type: schema.TypeString, Required: true, Description: "The path where the secret is stored.", }, "type": { Type: schema.TypeString, Computed: true, Description: "The secret type.", }, "title": { Type: schema.TypeString, Optional: true, Description: "The secret title.", }, "notes": { Type: schema.TypeString, Computed: true, Description: "The secret notes.", }, // fields[] "login": { Type: schema.TypeString, Computed: true, Description: "The secret login.", }, "key_pair": { Type: schema.TypeList, Computed: true, Description: "Public and private key pair.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "public_key": { Type: schema.TypeString, Computed: true, Description: "The public key.", }, "private_key": { Type: schema.TypeString, Computed: true, Sensitive: true, Description: "The private key.", }, }, }, }, "passphrase": { Type: schema.TypeString, Computed: true, Sensitive: true, Description: "The passphrase to unlock the key.", }, "host": {
Type: schema.TypeList, Computed: true, Description: "Hostname and port", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "host_name": { Type: schema.TypeString, Computed: true, Description: "The hostname.", }, "port": { Type: schema.TypeString, Computed: true, Description: "The port.", }, }, }, }, "file_ref": { Type: schema.TypeList, Computed: true, Sensitive: true, Description: "The secret file references", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "uid": { Type: schema.TypeString, Computed: true, Description: "The file ref UID.", }, "title": { Type: schema.TypeString, Computed: true, Description: "The file title.", }, "name": { Type: schema.TypeString, Computed: true, Description: "The file name.", }, "type": { Type: schema.TypeString, Computed: true, Description: "The file type.", }, "size": { Type: schema.TypeInt, Computed: true, Description: "The file size.", }, "last_modified": { Type: schema.TypeString, Computed: true, Description: "The file last modified date.", }, "content_base64": { Type: schema.TypeString, Computed: true, Description: "The file content (base64).", }, }, }, }, }, } } func dataSourceSshKeysRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { provider := m.(providerMeta) client := *provider.client var diags diag.Diagnostics path := strings.TrimSpace(d.Get("path").(string)) title := strings.TrimSpace(d.Get("title").(string)) secret, err := getRecord(path, title, client) if err != nil { return diag.FromErr(err) } dataSourceType := "sshKeys" recordType := secret.Type() if recordType != dataSourceType { return diag.Errorf("record type '%s' is not the expected type '%s' for this data source", recordType, dataSourceType) } if err = d.Set("type", recordType); err != nil { return diag.FromErr(err) } if err = d.Set("title", secret.Title()); err != nil { return diag.FromErr(err) } if err = d.Set("notes", secret.Notes()); err != nil { return diag.FromErr(err) } if err = d.Set("login", secret.GetFieldValueByType("login")); err != nil { return diag.FromErr(err) } pairItems := getKeyPairItemData(secret) if err := d.Set("key_pair", pairItems); err != nil { return diag.FromErr(err) } if err = d.Set("passphrase", secret.GetFieldValueByType("password")); err != nil { return diag.FromErr(err) } hostItems := getHostItemData(secret) if err := d.Set("host", hostItems); err != nil { return diag.FromErr(err) } fileItems := getFileItemsData(secret.Files) if err := d.Set("file_ref", fileItems); err != nil { return diag.FromErr(err) } d.SetId(path) return diags }
glsl.go
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package glsl contains routines for manipulation of OpenGL ES Shading Language programs. It exposes functions for parsing, serializing and evaluating GLES Shading Language programs. While this package contains a number of sub-packages, the only sub-package which is expected to be imported directly is the ast package, which contains the definitions of the AST of the parsed program. The main functionality of the other packages is exposed through the functions of this package. */ package glsl import ( "fmt" "strconv" "strings" "github.com/google/gapid/gapis/api/gles/glsl/ast" "github.com/google/gapid/gapis/api/gles/glsl/evaluator" "github.com/google/gapid/gapis/api/gles/glsl/parser" pp "github.com/google/gapid/gapis/api/gles/glsl/preprocessor" "github.com/google/gapid/gapis/api/gles/glsl/sema" ) // Version describes a GLSL shader version. type Version struct { Major, Minor, Point int } // String returns the string form of the GLSL version. func (v Version) String() string { return fmt.Sprintf("%d%d%d", v.Major, v.Minor, v.Point) } // GreaterThan returns true if this Version is greater than Version{major, minor}. func (v Version) GreaterThan(major, minor int) bool { switch { case v.Major > major: return true case v.Major < major: return false case v.Minor > minor: return true default: return false } } // ParseVersion parses and returns the Version from the string s. func ParseVersion(s string) Version { if i, err := strconv.Atoi(s); err == nil { major := (i / 100) % 10 minor := (i / 10) % 10 point := i % 10 return Version{Major: major, Minor: minor, Point: point} } else { return Version{Major: 1} } } // Parse preprocesses and parses an OpenGL ES Shading language program present in the first // argument. The second argument specifies the language, whose syntax to employ during parsing. // The parsed AST is returned in the first result. If any parsing errors are encountered, they // are returned in the second result. func Parse(src string, lang ast.Language) (program interface{}, version Version, extensions []pp.Extension, err []error) { prog, v, exts, err := parser.Parse(src, lang, evaluator.EvaluatePreprocessorExpression) return prog, ParseVersion(v), exts, err } // Formatter is a helper function which turns any AST node into something that can be printed with // %v. The returned object's default format will print the tree under the ast node in a reindented // form. The alternate format flag (%#v) will print the node while preserving original whitespace, // if this is present in the ***Cst nodes of the tree. func Formatter(node interface{}) fmt.Formatter { return parser.Formatter(node) } // Analyze performs semantic analysis on the parsed program AST. It computes the types of all program // expression, array sizes and values of constant variables. Any encountered errors are returned // as a result. func
(program interface{}) (err []error) { return sema.Analyze(program, evaluator.Evaluate) } // Format returns the source for the given shader AST tree and version. func Format(tree interface{}, version Version, extensions []pp.Extension) string { src := fmt.Sprintf("%v", Formatter(tree)) if version.Point == 0 && version.Minor == 0 && version.Major == 0 { return src } header := fmt.Sprintf("#version %v", version) for _, extension := range extensions { header += fmt.Sprintf("\n#extension %s : %s", extension.Name, extension.Behaviour) } if !strings.HasPrefix(src, "\n") { header += "\n" } return header + src }
Analyze
test_btmp.py
# coding: utf-8 # Python libs from __future__ import absolute_import import logging # Salt testing libs from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, mock_open from tests.support.mixins import LoaderModuleMockMixin # Salt libs import salt.beacons.btmp as btmp from salt.ext import six raw = b'\x06\x00\x00\x00Nt\x00\x00ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdd\xc7\xc2Y\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' pack = (6, 29774, b'ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00', b'garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 0, 0, 0, 1505937373, 0, 0, 0, 0, 16777216) log = logging.getLogger(__name__) @skipIf(NO_MOCK, NO_MOCK_REASON) class BTMPBeaconTestCase(TestCase, LoaderModuleMockMixin): ''' Test case for salt.beacons.[s] ''' def setup_loader_modules(self):
def test_non_list_config(self): config = {} ret = btmp.validate(config) self.assertEqual(ret, (False, 'Configuration for btmp beacon must' ' be a list.')) def test_empty_config(self): config = [{}] ret = btmp.validate(config) self.assertEqual(ret, (True, 'Valid beacon configuration')) def test_no_match(self): config = [{'users': {'gareth': {'time': {'end': '5pm', 'start': '3pm'}}}} ] ret = btmp.validate(config) self.assertEqual(ret, (True, 'Valid beacon configuration')) with patch('salt.utils.files.fopen', mock_open(b'')) as m_open: ret = btmp.beacon(config) call_args = next(six.itervalues(m_open.filehandles))[0].call.args assert call_args == (btmp.BTMP, 'rb'), call_args assert ret == [], ret def test_match(self): with patch('salt.utils.files.fopen', mock_open(read_data=raw)): with patch('struct.unpack', MagicMock(return_value=pack)): config = [{'users': {'garet': {}}}] ret = btmp.validate(config) self.assertEqual(ret, (True, 'Valid beacon configuration')) _expected = [{'addr': 1505937373, 'exit_status': 0, 'inittab': '', 'hostname': '::1', 'PID': 29774, 'session': 0, 'user': 'garet', 'time': 0, 'line': 'ssh:notty', 'type': 6}] ret = btmp.beacon(config) self.assertEqual(ret, _expected) def test_match_time(self): with patch('salt.utils.files.fopen', mock_open(read_data=raw)): with patch('time.time', MagicMock(return_value=1506121200)): with patch('struct.unpack', MagicMock(return_value=pack)): config = [{'users': {'garet': {'time': {'end': '5pm', 'start': '3pm'}}}} ] ret = btmp.validate(config) self.assertEqual(ret, (True, 'Valid beacon configuration')) _expected = [{'addr': 1505937373, 'exit_status': 0, 'inittab': '', 'hostname': '::1', 'PID': 29774, 'session': 0, 'user': 'garet', 'time': 0, 'line': 'ssh:notty', 'type': 6}] ret = btmp.beacon(config) self.assertEqual(ret, _expected)
return { btmp: { '__context__': {'btmp.loc': 2}, '__salt__': {}, } }
header-search.ts
// Workaround for: https://github.com/bazelbuild/rules_nodejs/issues/1265 /// <reference types="@angular/localize/init" /> import { AnimationEvent } from '@angular/animations'; import { BreakpointObserver } from '@angular/cdk/layout'; import { CdkOverlayOrigin, ConnectedPosition, Overlay, RepositionScrollStrategy, ScrollStrategy, } from '@angular/cdk/overlay'; import { ChangeDetectionStrategy, Component, ContentChild, ElementRef, HostListener, Inject, InjectionToken, Input, ViewEncapsulation, } from '@angular/core'; import { Breakpoints, SCALING_FACTOR_4K, SCALING_FACTOR_5K, TypeRef } from '@sbb-esta/angular/core'; import { animationFrameScheduler, interval, Observable, Subscription } from 'rxjs'; import { map } from 'rxjs/operators'; import { SbbSearch } from './search'; import { sbbSearchAnimations } from './search-animation'; /** Injection token that determines the scroll handling while a select is open. */ export const SBB_SEARCH_SCROLL_STRATEGY = new InjectionToken<() => ScrollStrategy>( 'sbb-select-scroll-strategy' ); /** @docs-private */ export function SBB_SEARCH_SCROLL_STRATEGY_PROVIDER_FACTORY( overlay: Overlay ): () => RepositionScrollStrategy { return () => overlay.scrollStrategies.reposition(); } /** @docs-private */ export const SBB_SEARCH_SCROLL_STRATEGY_PROVIDER = { provide: SBB_SEARCH_SCROLL_STRATEGY, deps: [Overlay], useFactory: SBB_SEARCH_SCROLL_STRATEGY_PROVIDER_FACTORY, }; const searchOverlayPositions: ConnectedPosition[] = ( ['start', 'center', 'end'] as Array<'start' | 'center' | 'end'> ).map((x) => ({ originX: x, originY: 'top', overlayX: x, overlayY: 'top', })); /** For mobile, the overlay should be attached approximately at the center of the trigger. */ const searchOverlayMobilePosition: ConnectedPosition[] = [searchOverlayPositions[1]]; let nextId = 1; @Component({ selector: 'button[sbbHeaderSearch]', templateUrl: './header-search.html', styleUrls: ['./header-search.css'], exportAs: 'sbbHeaderSearch', host: { class: 'sbb-header-search sbb-button-reset-frameless', '[attr.id]': 'this.id', 'aria-haspopup': 'true', '[attr.aria-expanded]': 'panelOpen || null', '[attr.aria-controls]': 'panelOpen ? panelId : null', }, encapsulation: ViewEncapsulation.None, changeDetection: ChangeDetectionStrategy.OnPush, animations: [sbbSearchAnimations.growShrink], }) export class SbbHeaderSearch { _labelSearch: string = $localize`:Button label for the header search@@sbbSearchHeaderButtonLabel:Search`; /** Identifier of search. */ @Input() id: string = `sbb-header-search-id-${nextId++}`; /** The label to be shown next to the indicator icon. */ @Input() label?: string; /** Type of the search button. Defaults to "button" if not specified. */ @Input() type: string = 'button'; /** The contained search instance. */ @ContentChild(SbbSearch, { static: true }) _search: SbbSearch; /** The indicator icon from the contained sbb-search component. */ get svgIcon(): string { return this._search?.svgIcon || 'kom:magnifying-glass-small'; } /** Whether or not the overlay panel is open. */ get panelOpen(): boolean { return this._panelOpen; } /** The id of the overlay panel. */ readonly panelId = `panel-${this.id}`; _overlayPanelClass: string | string[] = ['sbb-overlay-panel', 'sbb-header-search-panel']; /** * This position config ensures that the top "start" corner of the overlay * is aligned with with the top "start" of the origin by default (overlapping * the trigger completely). */ _positions: Observable<ConnectedPosition[]>; /** Strategy that will be used to handle scrolling while the select panel is open. */ _scrollStrategy: ScrollStrategy; /** The animation state, which indicates whether the overlay is open or not. */ _animationState: 'void' | 'open' = 'void'; _overlayWidth: Observable<string>; _overlayOrigin: CdkOverlayOrigin; /** Factory function used to create a scroll strategy for this select. */ private _scrollStrategyFactory: () => ScrollStrategy; /** Whether or not the overlay panel is open. */ private _panelOpen = false; private _animationSubscription: Subscription | undefined; constructor( elementRef: ElementRef<HTMLElement>, private _breakpointObserver: BreakpointObserver, @Inject(SBB_SEARCH_SCROLL_STRATEGY) scrollStrategyFactory: any ) { this._scrollStrategyFactory = scrollStrategyFactory; this._scrollStrategy = this._scrollStrategyFactory(); this._overlayOrigin = new CdkOverlayOrigin(elementRef); this._positions = this._breakpointObserver .observe(Breakpoints.Mobile) .pipe(map((m) => (m.matches ? searchOverlayMobilePosition : searchOverlayPositions))); // Compare overlay base width with the scss definition. const overlayBaseWidth = 376; this._overlayWidth = this._breakpointObserver .observe([Breakpoints.Mobile, Breakpoints.Desktop4k, Breakpoints.Desktop5k]) .pipe( map((m) => { if (m.breakpoints[Breakpoints.Mobile]) { return 'calc(100vw - 20px)';
return `${overlayBaseWidth * SCALING_FACTOR_4K}px`; } else { return `${overlayBaseWidth}px`; } }) ); } /** Toggles the overlay panel open or closed. */ toggle(): void { this.panelOpen ? this.close() : this.open(); } /** Opens the overlay panel. */ open(): void { if (!this._panelOpen) { this._panelOpen = true; this._animationState = 'open'; } } /** Closes the overlay panel and focuses the host element. */ close(): void { if (this._panelOpen) { this._panelOpen = false; this._animationState = 'void'; } } /** Opens the overlay and prevents the click event from bubbling. */ @HostListener('click', ['$event']) _handleClick(event: TypeRef<MouseEvent>) { // Stop bubbling, because this would cause the autocomplete to automatically close. event.stopImmediatePropagation(); this.open(); } /** * Called on overlay animation start. * If available and in a browser environment, opens the autocomplete with a delay * and updates the autocomplete size until the end of the overlay animation. */ _onAnimationStart(event: AnimationEvent) { // We need to check for requestAnimationFrame, because the animationFrameScheduler // internally uses it, which ensures a smooth animation. if (!this._search._autocompleteTrigger || typeof requestAnimationFrame !== 'function') { this._search._input.focus(); return; } const isOpening = event.toState === 'open'; const trigger = this._search._autocompleteTrigger!; this._animationSubscription = interval(0, animationFrameScheduler).subscribe(() => { trigger._updateSize(); // Wait until mininum width is reached before setting the focus in the input, which // opens the autocomplete, in order to avoid a zero width autocomplete. if ( isOpening && !trigger.panelOpen && trigger.connectedTo.elementRef.nativeElement.getBoundingClientRect().width > 50 ) { this._search._input.focus(); } }); } /** * Called at the end of the overlay animation. * Unsubscribes from the autocomplete animation subscription, if available. */ _onAnimationDone(event: AnimationEvent) { this._animationSubscription?.unsubscribe(); this._animationSubscription = undefined; // This call is required as the unsubscription might happen too early, in // which case the autocomplete is not the exact same width as the sbb-search. const trigger = this._search._autocompleteTrigger; if (trigger) { Promise.resolve().then(() => { if (trigger.panelOpen) { trigger._updateSize(); } else if (event.toState === 'open') { this._search._input.focus(); } }); } } }
} else if (m.breakpoints[Breakpoints.Desktop5k]) { return `${overlayBaseWidth * SCALING_FACTOR_5K}px`; } else if (m.breakpoints[Breakpoints.Desktop4k]) {
panos_lic.py
#!/usr/bin/env python # Copyright 2016 Palo Alto Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: panos_lic short_description: apply authcode to a device/instance description: - Apply an authcode to a device. - The authcode should have been previously registered on the Palo Alto Networks support portal. - The device should have Internet access. author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" version_added: "2.3" requirements: - pan-python options: ip_address: description: - IP address (or hostname) of PAN-OS device required: true password: description: - password for authentication required: true username: description: - username for authentication required: false default: "admin" auth_code: description: - authcode to be applied required: true force: description: - whether to apply authcode even if device is already licensed required: false default: "false" ''' EXAMPLES = ''' - hosts: localhost connection: local tasks: - name: fetch license panos_lic: ip_address: "192.168.1.1" password: "paloalto" auth_code: "IBADCODE" register: result - name: Display serialnumber (if already registered) debug: var: "{{result.serialnumber}}" ''' RETURN = ''' serialnumber: description: serialnumber of the device in case that it has been already registered returned: success type: string sample: 007200004214 ''' from ansible.module_utils.basic import AnsibleModule try: import pan.xapi HAS_LIB = True except ImportError: HAS_LIB = False def get_serial(xapi, module): xapi.op(cmd="show system info", cmd_xml=True) r = xapi.element_root serial = r.find('.//serial') if serial is None: module.fail_json(msg="No <serial> tag in show system info") serial = serial.text return serial def apply_authcode(xapi, module, auth_code): try: xapi.op(cmd='request license fetch auth-code "%s"' % auth_code, cmd_xml=True) except pan.xapi.PanXapiError: if hasattr(xapi, 'xml_document'): if 'Successfully' in xapi.xml_document: return if 'Invalid Auth Code' in xapi.xml_document: module.fail_json(msg="Invalid Auth Code") raise return def fetch_authcode(xapi, module):
def main(): argument_spec = dict( ip_address=dict(required=True), password=dict(required=True, no_log=True), auth_code=dict(), username=dict(default='admin'), force=dict(type='bool', default=False) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_LIB: module.fail_json(msg='pan-python is required for this module') ip_address = module.params["ip_address"] password = module.params["password"] auth_code = module.params["auth_code"] force = module.params['force'] username = module.params['username'] xapi = pan.xapi.PanXapi( hostname=ip_address, api_username=username, api_password=password ) if not force: serialnumber = get_serial(xapi, module) if serialnumber != 'unknown': return module.exit_json(changed=False, serialnumber=serialnumber) if auth_code: apply_authcode(xapi, module, auth_code) else: fetch_authcode(xapi, module) module.exit_json(changed=True, msg="okey dokey") if __name__ == '__main__': main()
try: xapi.op(cmd='request license fetch', cmd_xml=True) except pan.xapi.PanXapiError: if hasattr(xapi, 'xml_document'): if 'Successfully' in xapi.xml_document: return if 'Invalid Auth Code' in xapi.xml_document: module.fail_json(msg="Invalid Auth Code") raise return
main.rs
use askama::Template; use std::fs; use std::io; #[derive(Template)] #[template(path = "luthien.sass", escape = "none")] struct
{ wallpaper: Option<String>, colors: luthien_plugin::Colors, } impl From<luthien_plugin::Theme> for SassTemplate { fn from(theme: luthien_plugin::Theme) -> Self { Self { wallpaper: theme .wallpaper .map(|p| p.to_str().map(String::from)) .flatten(), colors: theme.colors, } } } trait AsHexCode { fn hex(&self) -> String; } impl AsHexCode for luthien_plugin::palette::Srgb { fn hex(&self) -> String { format!( "#{:02x}{:02x}{:02x}", (self.red * 0xFF as f32) as u8, (self.green * 0xFF as f32) as u8, (self.blue * 0xFF as f32) as u8 ) } } fn main() -> io::Result<()> { let input = luthien_plugin::get_input() .expect("Input was malformed. Try updating this plugin and/or Luthien."); fs::write( input.directories.output.join("luthien.scss"), SassTemplate::from(input.theme) .render() .expect("Couldn't render Sass template file."), )?; let sass_options = grass::Options::default().load_path(&input.directories.output); let mut count = (0, 0); for entry in fs::read_dir(input.directories.config)? { let entry = entry?; match grass::from_path(entry.path().to_str().unwrap(), &sass_options) { Ok(compiled) => { let outfile = input .directories .output .join(entry.path().with_extension("css").file_name().unwrap()); fs::write(outfile, compiled)?; count.0 += 1; } Err(e) => { eprintln!( "Failed to compile stylesheet {}:", entry.file_name().to_string_lossy() ); eprintln!("{}", e); } } count.1 += 1; } eprintln!("Successfully compiled {}/{} stylesheets.", count.0, count.1); Ok(()) }
SassTemplate
__init__.py
# -*- coding: utf-8 -*- """ The ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format <pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from this format. The format is self contained in the sense that it includes all necessary information for anyone to load it and use it. Dependencies are either stored directly with the model or referenced via a Conda environment. The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models using frameworks and inference logic that may not be natively included in MLflow. See :ref:`pyfunc-create-custom`. .. _pyfunc-filesystem-format: ***************** Filesystem format ***************** The Pyfunc format is defined as a directory structure containing all required data, code, and configuration:: ./dst-path/ ./MLmodel: configuration <code>: code packaged with the model (specified in the MLmodel file) <data>: data packaged with the model (specified in the MLmodel file) <env>: Conda environment definition (specified in the MLmodel file) The directory structure may contain additional contents that can be referenced by the ``MLmodel`` configuration. .. _pyfunc-model-config: MLModel configuration ##################### A Python model contains an ``MLmodel`` file in **python_function** format in its root with the following parameters: - loader_module [required]: Python module that can load the model. Expected as module identifier e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``. The imported module must contain a function with the following signature:: _load_pyfunc(path: string) -> <pyfunc model> The path argument is specified by the ``data`` parameter and may refer to a file or directory. - code [optional]: Relative path to a directory containing the code packaged with this model. All files and directories inside this directory are added to the Python path prior to importing the model loader. - data [optional]: Relative path to a file or directory containing model data. The path is passed to the model loader. - env [optional]: Relative path to an exported Conda environment. If present this environment should be activated prior to running the model. - Optionally, any additional parameters necessary for interpreting the serialized model in ``pyfunc`` format. .. rubric:: Example >>> tree example/sklearn_iris/mlruns/run1/outputs/linear-lr :: ├── MLmodel ├── code │   ├── sklearn_iris.py │   ├── data │   └── model.pkl └── mlflow_env.yml >>> cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel :: python_function: code: code data: data/model.pkl loader_module: mlflow.sklearn env: mlflow_env.yml main: sklearn_iris .. _pyfunc-inference-api: ************* Inference API ************* The convention for pyfunc models is to have a ``predict`` method or function with the following signature:: predict(model_input: pandas.DataFrame) -> [numpy.ndarray | pandas.Series | pandas.DataFrame] This convention is relied on by other MLflow components. .. _pyfunc-create-custom: ****************************** Creating custom Pyfunc models ****************************** MLflow's persistence modules provide convenience functions for creating models with the ``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and more); however, they do not cover every use case. For example, you may want to create an MLflow model with the ``pyfunc`` flavor using a framework that MLflow does not natively support. Alternatively, you may want to build an MLflow model that executes custom logic when evaluating queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc`` provides utilities for creating ``pyfunc`` models from arbitrary code and model data. The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts that the logic may require. An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact. .. _pyfunc-create-custom-workflows: Workflows ######### :meth:`save_model()` and :meth:`log_model()` support the following workflows: 1. Programmatically defining a new MLflow model, including its attributes and artifacts. Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can automatically download artifacts from their URIs and create an MLflow model directory. In this case, you must define a Python class which inherits from :class:`~PythonModel`, defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is specified via the ``python_model`` parameter; it is automatically serialized and deserialized as a Python class, including all of its attributes. 2. Interpreting pre-existing data as an MLflow model. If you already have a directory containing model data, :meth:`save_model()` and :meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter specifies the local filesystem path to the directory containing model data. In this case, you must provide a Python module, called a `loader module`. The loader module defines a ``_load_pyfunc()`` method that performs the following tasks: - Load data from the specified ``data_path``. For example, this process may include deserializing pickled Python objects or models or parsing CSV files. - Construct and return a pyfunc-compatible model wrapper. As in the first use case, this wrapper must define a ``predict()`` method that is used to evaluate queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`. The ``loader_module`` parameter specifies the name of your loader module. For an example loader module implementation, refer to the `loader module implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/ 74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_. .. _pyfunc-create-custom-selecting-workflow: Which workflow is right for my use case? ######################################## We consider the first workflow to be more user-friendly and generally recommend it for the following reasons: - It automatically resolves and collects specified model artifacts. - It automatically serializes and deserializes the ``python_model`` instance and all of its attributes, reducing the amount of user logic that is required to load the model - You can create Models using logic that is defined in the ``__main__`` scope. This allows custom models to be constructed in interactive environments, such as notebooks and the Python REPL. You may prefer the second, lower-level workflow for the following reasons: - Inference logic is always persisted as code, rather than a Python object. This makes logic easier to inspect and modify later. - If you have already collected all of your model data in a single location, the second workflow allows it to be saved in MLflow format directly, without enumerating constituent artifacts. """ import importlib import logging import numpy as np import os import pandas import shutil from copy import deepcopy import mlflow import mlflow.pyfunc.model import mlflow.pyfunc.utils from mlflow.models import Model from mlflow.pyfunc.model import PythonModel, PythonModelContext, get_default_conda_env from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.utils import PYTHON_VERSION, deprecated, get_major_minor_py_version from mlflow.utils.file_utils import TempDir, _copy_file_or_tree from mlflow.utils.model_utils import _get_flavor_configuration from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS FLAVOR_NAME = "python_function" MAIN = "loader_module" CODE = "code" DATA = "data" ENV = "env" PY_VERSION = "python_version" _logger = logging.getLogger(__name__) def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs): """ Add a ``pyfunc`` spec to the model configuration. Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model flavor out of an existing directory structure. For example, other model flavors can use this to specify how to use their output as a ``pyfunc``. NOTE: All paths are relative to the exported model root directory. :param model: Existing model. :param loader_module: The module to be used to load the model. :param data: Path to the model data. :param code: Path to the code dependencies. :param env: Conda environment. :param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification. Values must be YAML-serializable. :return: Updated model configuration. """ parms = deepcopy(kwargs) parms[MAIN] = loader_module parms[PY_VERSION] = PYTHON_VERSION if code: parms[CODE] = code if data: parms[DATA] = data if env: parms[ENV] = env return model.add_flavor(FLAVOR_NAME, **parms) def _load_model_env(path): """ Get ENV file string from a model configuration stored in Python Function format. Returned value is a model-relative path to a Conda Environment file, or None if none was specified at model save time """ return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None) def load_model(model_uri, suppress_warnings=False): """ Load a model stored in Python function format. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html# artifact-locations>`_. :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model loading process will be suppressed. If ``False``, these warning messages will be emitted. """ return load_pyfunc(model_uri, suppress_warnings) @deprecated("pyfunc.load_model", 1.0) def load_pyfunc(model_uri, suppress_warnings=False): """ Load a model stored in Python function format. :param model_uri: The location, in URI format, of the MLflow model. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html# artifact-locations>`_. :param suppress_warnings: If ``True``, non-fatal warning messages associated with the model loading process will be suppressed. If ``False``, these warning messages will be emitted. """ local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME) model_py_version = conf.get(PY_VERSION) if not suppress_warnings: _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version) if CODE in conf and conf[CODE]: code_path = os.path.join(local_model_path, conf[CODE]) mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path) data_path = os.path.join(local_model_path, conf[DATA]) if (DATA in conf) else local_model_path return importlib.import_module(conf[MAIN])._load_pyfunc(data_path) def _warn_potentially_incompatible_py_version_if_nec
e version of Python that was used to save a given model with the version of Python that is currently running. If a major or minor version difference is detected, logs an appropriate warning. """ if model_py_version is None: _logger.warning( "The specified model does not have a specified Python version. It may be" " incompatible with the version of Python that is currently running: Python %s", PYTHON_VERSION) elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION): _logger.warning( "The version of Python that the model was saved in, `Python %s`, differs" " from the version of Python that is currently running, `Python %s`," " and may be incompatible", model_py_version, PYTHON_VERSION) def spark_udf(spark, model_uri, result_type="double"): """ A Spark UDF that can be used to invoke the Python function formatted model. Parameters passed to the UDF are forwarded to the model as a DataFrame where the names are ordinals (0, 1, ...). The predictions are filtered to contain only the columns that can be represented as the ``result_type``. If the ``result_type`` is string or array of strings, all predictions are converted to string. If the result type is not an array type, the left most column with matching type is returned. >>> predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model") >>> df.withColumn("prediction", predict("name", "age")).show() :param spark: A SparkSession object. :param model_uri: The location, in URI format, of the MLflow model with the :py:mod:`mlflow.pyfunc` flavor. For example: - ``/Users/me/path/to/local/model`` - ``relative/path/to/local/model`` - ``s3://my_bucket/path/to/model`` - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` For more information about supported URI schemes, see `Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html# artifact-locations>`_. :param result_type: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Only a primitive type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed. The following classes of result type are supported: - "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an ``int32`` or an exception if there is none. - "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an ``int64`` or an exception if there is none. - ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested size. - "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to ``float32`` or an exception if there is none. - "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to ``double`` or an exception if there is none. - ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or an exception if there are no numeric columns. - "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``. - ``ArrayType(StringType)``: All columns converted to ``string``. :return: Spark UDF that applies the model's ``predict`` method to the data and returns a type specified by ``result_type``, which by default is a double. """ # Scope Spark import to this method so users don't need pyspark to use non-Spark-related # functionality. from mlflow.pyfunc.spark_model_cache import SparkModelCache from pyspark.sql.functions import pandas_udf from pyspark.sql.types import _parse_datatype_string from pyspark.sql.types import ArrayType, DataType from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType if not isinstance(result_type, DataType): result_type = _parse_datatype_string(result_type) elem_type = result_type if isinstance(elem_type, ArrayType): elem_type = elem_type.elementType supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType] if not any([isinstance(elem_type, x) for x in supported_types]): raise MlflowException( message="Invalid result_type '{}'. Result type can only be one of or an array of one " "of the following types types: {}".format(str(elem_type), str(supported_types)), error_code=INVALID_PARAMETER_VALUE) local_model_path = _download_artifact_from_uri(artifact_uri=model_uri) archive_path = SparkModelCache.add_local_model(spark, local_model_path) def predict(*args): model = SparkModelCache.get_or_load(archive_path) schema = {str(i): arg for i, arg in enumerate(args)} # Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2) columns = [str(i) for i, _ in enumerate(args)] pdf = pandas.DataFrame(schema, columns=columns) result = model.predict(pdf) if not isinstance(result, pandas.DataFrame): result = pandas.DataFrame(data=result) elif type(elem_type) == IntegerType: result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int32]).astype(np.int32) elif type(elem_type) == LongType: result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, np.int, np.long]) elif type(elem_type) == FloatType: result = result.select_dtypes(include=(np.number,)).astype(np.float32) elif type(elem_type) == DoubleType: result = result.select_dtypes(include=(np.number,)).astype(np.float64) if len(result.columns) == 0: raise MlflowException( message="The the model did not produce any values compatible with the requested " "type '{}'. Consider requesting udf with StringType or " "Arraytype(StringType).".format(str(elem_type)), error_code=INVALID_PARAMETER_VALUE) if type(elem_type) == StringType: result = result.applymap(str) if type(result_type) == ArrayType: return pandas.Series([row[1].values for row in result.iterrows()]) else: return result[result.columns[0]] return pandas_udf(predict, result_type) def save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None, mlflow_model=Model(), python_model=None, artifacts=None, **kwargs): """ save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\ mlflow_model=Model(), python_model=None, artifacts=None) Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the local filesystem. For information about the workflows that this method supports, please see :ref:`"workflows for creating custom pyfunc models" <pyfunc-create-custom-workflows>` and :ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`. Note that the parameters for the first workflow: ``loader_module``, ``data_path`` and the parameters for the second workflow: ``python_model``, ``artifacts``, cannot be specified together. :param path: The path to which to save the Python model. :param loader_module: The name of the Python module that is used to load the model from ``data_path``. This module must define a method with the prototype ``_load_pyfunc(data_path)``. If not ``None``, this module and its dependencies must be included in one of the following locations: - The MLflow library. - Package(s) listed in the model's Conda environment, specified by the ``conda_env`` parameter. - One or more of the files specified by the ``code_path`` parameter. :param data_path: Path to a file or directory containing model data. :param code_path: A list of local filesystem paths to Python file dependencies (or directories containing file dependencies). These files are *prepended* to the system path before the model is loaded. :param conda_env: Either a dictionary representation of a Conda environment or the path to a Conda environment yaml file. This decribes the environment this model should be run in. If ``python_model`` is not ``None``, the Conda environment must at least specify the dependencies contained in :func:`get_default_conda_env()`. If ``None``, the default :func:`get_default_conda_env()` environment is added to the model. The following is an *example* dictionary representation of a Conda environment:: { 'name': 'mlflow-env', 'channels': ['defaults'], 'dependencies': [ 'python=3.7.0', 'cloudpickle==0.5.8' ] } :param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the **python_function** flavor. :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is serialized using the CloudPickle library. Any dependencies of the class should be included in one of the following locations: - The MLflow library. - Package(s) listed in the model's Conda environment, specified by the ``conda_env`` parameter. - One or more of the files specified by the ``code_path`` parameter. Note: If the class is imported from another module, as opposed to being defined in the ``__main__`` scope, the defining module should also be included in one of the listed locations. :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs are resolved to absolute filesystem paths, producing a dictionary of ``<name, absolute_path>`` entries. ``python_model`` can reference these resolved entries as the ``artifacts`` property of the ``context`` parameter in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>` and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`. For example, consider the following ``artifacts`` dictionary:: { "my_file": "s3://my-bucket/path/to/my/file" } In this case, the ``"my_file"`` artifact is downloaded from S3. The ``python_model`` can then refer to ``"my_file"`` as an absolute filesystem path via ``context.artifacts["my_file"]``. If ``None``, no artifacts are added to the model. """ mlflow_model = kwargs.pop('model', mlflow_model) if len(kwargs) > 0: raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs)) first_argument_set = { "loader_module": loader_module, "data_path": data_path, } second_argument_set = { "artifacts": artifacts, "python_model": python_model, } first_argument_set_specified = any([item is not None for item in first_argument_set.values()]) second_argument_set_specified = any([item is not None for item in second_argument_set.values()]) if first_argument_set_specified and second_argument_set_specified: raise MlflowException( message=( "The following sets of parameters cannot be specified together: {first_set_keys}" " and {second_set_keys}. All parameters in one set must be `None`. Instead, found" " the following values: {first_set_entries} and {second_set_entries}".format( first_set_keys=first_argument_set.keys(), second_set_keys=second_argument_set.keys(), first_set_entries=first_argument_set, second_set_entries=second_argument_set)), error_code=INVALID_PARAMETER_VALUE) elif (loader_module is None) and (python_model is None): raise MlflowException( message="Either `loader_module` or `python_model` must be specified!", error_code=INVALID_PARAMETER_VALUE) if first_argument_set_specified: return _save_model_with_loader_module_and_data_path( path=path, loader_module=loader_module, data_path=data_path, code_paths=code_path, conda_env=conda_env, mlflow_model=mlflow_model) elif second_argument_set_specified: return mlflow.pyfunc.model._save_model_with_class_artifacts_params( path=path, python_model=python_model, artifacts=artifacts, conda_env=conda_env, code_paths=code_path, mlflow_model=mlflow_model) def log_model(artifact_path, loader_module=None, data_path=None, code_path=None, conda_env=None, python_model=None, artifacts=None): """ Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow artifact for the current run. For information about the workflows that this method supports, see :ref:`Workflows for creating custom pyfunc models <pyfunc-create-custom-workflows>` and :ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`. You cannot specify the parameters for the first workflow: ``loader_module``, ``data_path`` and the parameters for the second workflow: ``python_model``, ``artifacts`` together. :param artifact_path: The run-relative artifact path to which to log the Python model. :param loader_module: The name of the Python module that is used to load the model from ``data_path``. This module must define a method with the prototype ``_load_pyfunc(data_path)``. If not ``None``, this module and its dependencies must be included in one of the following locations: - The MLflow library. - Package(s) listed in the model's Conda environment, specified by the ``conda_env`` parameter. - One or more of the files specified by the ``code_path`` parameter. :param data_path: Path to a file or directory containing model data. :param code_path: A list of local filesystem paths to Python file dependencies (or directories containing file dependencies). These files are *prepended* to the system path before the model is loaded. :param conda_env: Either a dictionary representation of a Conda environment or the path to a Conda environment yaml file. This decribes the environment this model should be run in. If ``python_model`` is not ``None``, the Conda environment must at least specify the dependencies contained in :func:`get_default_conda_env()`. If `None`, the default :func:`get_default_conda_env()` environment is added to the model. The following is an *example* dictionary representation of a Conda environment:: { 'name': 'mlflow-env', 'channels': ['defaults'], 'dependencies': [ 'python=3.7.0', 'cloudpickle==0.5.8' ] } :param python_model: An instance of a subclass of :class:`~PythonModel`. This class is serialized using the CloudPickle library. Any dependencies of the class should be included in one of the following locations: - The MLflow library. - Package(s) listed in the model's Conda environment, specified by the ``conda_env`` parameter. - One or more of the files specified by the ``code_path`` parameter. Note: If the class is imported from another module, as opposed to being defined in the ``__main__`` scope, the defining module should also be included in one of the listed locations. :param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs are resolved to absolute filesystem paths, producing a dictionary of ``<name, absolute_path>`` entries. ``python_model`` can reference these resolved entries as the ``artifacts`` property of the ``context`` parameter in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>` and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`. For example, consider the following ``artifacts`` dictionary:: { "my_file": "s3://my-bucket/path/to/my/file" } In this case, the ``"my_file"`` artifact is downloaded from S3. The ``python_model`` can then refer to ``"my_file"`` as an absolute filesystem path via ``context.artifacts["my_file"]``. If ``None``, no artifacts are added to the model. """ return Model.log(artifact_path=artifact_path, flavor=mlflow.pyfunc, loader_module=loader_module, data_path=data_path, code_path=code_path, python_model=python_model, artifacts=artifacts, conda_env=conda_env) def _save_model_with_loader_module_and_data_path(path, loader_module, data_path=None, code_paths=None, conda_env=None, mlflow_model=Model()): """ Export model as a generic Python function model. :param path: The path to which to save the Python model. :param loader_module: The name of the Python module that is used to load the model from ``data_path``. This module must define a method with the prototype ``_load_pyfunc(data_path)``. :param data_path: Path to a file or directory containing model data. :param code_paths: A list of local filesystem paths to Python file dependencies (or directories containing file dependencies). These files are *prepended* to the system path before the model is loaded. :param conda_env: Either a dictionary representation of a Conda environment or the path to a Conda environment yaml file. If provided, this decribes the environment this model should be run in. :return: Model configuration containing model info. """ if os.path.exists(path): raise MlflowException( message="Path '{}' already exists".format(path), error_code=RESOURCE_ALREADY_EXISTS) os.makedirs(path) code = None data = None env = None if data_path is not None: model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data") data = model_file if code_paths is not None: for code_path in code_paths: _copy_file_or_tree(src=code_path, dst=path, dst_dir="code") code = "code" if conda_env is not None: shutil.copy(src=conda_env, dst=os.path.join(path, "mlflow_env.yml")) env = "mlflow_env.yml" mlflow.pyfunc.add_to_model( mlflow_model, loader_module=loader_module, code=code, data=data, env=env) mlflow_model.save(os.path.join(path, 'MLmodel')) return mlflow_model loader_template = """ import importlib import os import sys def load_pyfunc(): {update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}') """
essary(model_py_version=None): """ Compares th
flags.go
// Copyright © 2018 One Concern package cmd import ( "context" "fmt" "io/ioutil" "strings" "github.com/oneconcern/datamon/pkg/model" "github.com/oneconcern/datamon/pkg/storage" "github.com/oneconcern/datamon/pkg/storage/gcs" "github.com/oneconcern/datamon/pkg/storage/localfs" "github.com/spf13/afero" "github.com/spf13/cobra" ) type paramsT struct { bundle struct { ID string DataPath string Message string ContributorEmail string MountPath string File string Daemonize bool Stream bool FileList string SkipOnError bool ConcurrencyFactor int NameFilter string } web struct { port int } label struct { Name string } repo struct { MetadataBucket string RepoName string BlobBucket string Description string ContributorEmail string ContributorName string } root struct { credFile string logLevel string cpuProf bool } core struct { ConcurrencyFactor int BatchSize int } } var params = paramsT{} func addBundleFlag(cmd *cobra.Command) string { bundleID := "bundle" if cmd != nil { cmd.Flags().StringVar(&params.bundle.ID, bundleID, "", "The hash id for the bundle, if not specified the latest bundle will be used") } return bundleID } func addDataPathFlag(cmd *cobra.Command) string { destination := "destination" cmd.Flags().StringVar(&params.bundle.DataPath, destination, "", "The path to the download dir") return destination } func addNameFilterFlag(cmd *cobra.Command) string { nameFilter := "name-filter" cmd.Flags().StringVar(&params.bundle.NameFilter, nameFilter, "", "A regular expression (RE2) to match names of bundle entries.") return nameFilter } func addMountPathFlag(cmd *cobra.Command) string { mount := "mount" cmd.Flags().StringVar(&params.bundle.MountPath, mount, "", "The path to the mount dir") return mount } func addPathFlag(cmd *cobra.Command) string { path := "path" cmd.Flags().StringVar(&params.bundle.DataPath, path, "", "The path to the folder or bucket (gs://<bucket>) for the data") return path } func addCommitMessageFlag(cmd *cobra.Command) string { message := "message" cmd.Flags().StringVar(&params.bundle.Message, message, "", "The message describing the new bundle") return message } func addFileListFlag(cmd *cobra.Command) string { fileList := "files" cmd.Flags().StringVar(&params.bundle.FileList, fileList, "", "Text file containing list of files separated by newline.") return fileList } func addBundleFileFlag(cmd *cobra.Command) string { file := "file" cmd.Flags().StringVar(&params.bundle.File, file, "", "The file to download from the bundle") return file } func addDaemonizeFlag(cmd *cobra.Command) string { daemonize := "daemonize" if cmd != nil { cmd.Flags().BoolVar(&params.bundle.Daemonize, daemonize, false, "Whether to run the command as a daemonized process") } return daemonize } func addStreamFlag(cmd *cobra.Command) string { stream := "stream" cmd.Flags().BoolVar(&params.bundle.Stream, stream, true, "Stream in the FS view of the bundle, do not download all files. Default to true.") return stream } func addSkipMissingFlag(cmd *cobra.Command) string { skipOnError := "skip-on-error" cmd.Flags().BoolVar(&params.bundle.SkipOnError, skipOnError, false, "Skip files encounter errors while reading."+ "The list of files is either generated or passed in. During upload files can be deleted or encounter an error. Setting this flag will skip those files. Default to false") return skipOnError } const concurrencyFactorFlag = "concurrency-factor" func addConcurrencyFactorFlag(cmd *cobra.Command) string { concurrencyFactor := concurrencyFactorFlag cmd.Flags().IntVar(&params.bundle.ConcurrencyFactor, concurrencyFactor, 100, "Heuristic on the amount of concurrency used by various operations. "+ "Turn this value down to use less memory, increase for faster operations.") return concurrencyFactor } func addCoreConcurrencyFactorFlag(cmd *cobra.Command) string { // this takes the usual "concurrency-factor" flag, but sets non-object specific settings concurrencyFactor := concurrencyFactorFlag cmd.Flags().IntVar(&params.core.ConcurrencyFactor, concurrencyFactor, 100, "Heuristic on the amount of concurrency used by core operations (e.g. bundle list). "+ "Concurrent retrieval of bundle metadata is capped by the 'batch-size' parameter. "+ "Turn this value down to use less memory, increase for faster operations.") return concurrencyFactor } func addBatchSizeFlag(cmd *cobra.Command) string { batchSize := "batch-size" cmd.Flags().IntVar(&params.core.BatchSize, batchSize, 1024, "Number of bundles streamed together as a batch. This can be tuned for performance based on network connectivity") return batchSize } func addWebPortFlag(cmd *cobra.Command) string { cmd.Flags().IntVar(&params.web.port, webPort, 3003, "Port number for the web server") return webPort } func addLabelNameFlag(cmd *cobra.Command) string { labelName := "label" cmd.Flags().StringVar(&params.label.Name, labelName, "", "The human-readable name of a label") return labelName } func addRepoNameOptionFlag(cmd *cobra.Command) string { repo := "repo" cmd.Flags().StringVar(&params.repo.RepoName, repo, "", "The name of this repository") return repo } func addBucketNameFlag(cmd *cobra.Command) string { meta := "meta" cmd.Flags().StringVar(&params.repo.MetadataBucket, meta, "", "The name of the bucket used by datamon metadata") _ = cmd.Flags().MarkHidden(meta) return meta } func addRepoDescription(cmd *cobra.Command) string { description := "description" cmd.Flags().StringVar(&params.repo.Description, description, "", "The description for the repo") return description } func addBlobBucket(cmd *cobra.Command) string { blob := "blob" cmd.Flags().StringVar(&params.repo.BlobBucket, blob, "", "The name of the bucket hosting the datamon blobs") _ = cmd.Flags().MarkHidden(blob) return blob } func addContributorEmail(cmd *cobra.Command) string { contributorEmail := "email" cmd.Flags().StringVar(&params.repo.ContributorEmail, contributorEmail, "", "The email of the contributor") return contributorEmail } func addContributorName(cmd *cobra.Command) string { contributorName := "name" cmd.Flags().StringVar(&params.repo.ContributorName, contributorName, "", "The name of the contributor") return contributorName } func addCredentialFile(cmd *cobra.Command) string { credential := "credential" cmd.Flags().StringVar(&params.root.credFile, credential, "", "The path to the credential file") return credential } func addLogLevel(cmd *cobra.Command) string { loglevel := "loglevel" cmd.Flags().StringVar(&params.root.logLevel, loglevel, "info", "The logging level") return loglevel } func addCPUProfFlag(cmd *cobra.Command) string { cpuprof := "cpuprof" cmd.Flags().BoolVar(&params.root.cpuProf, cpuprof, false, "Toggle runtime profiling") return cpuprof } /** parameters struct to other formats */ type cmdStoresRemote struct { meta storage.Store blob storage.Store } func paramsToRemoteCmdStores(ctx context.Context, params paramsT) (cmdStoresRemote, error) { stores := cmdStoresRemote{} meta, err := gcs.New(ctx, params.repo.MetadataBucket, config.Credential) if err != nil { return cmdStoresRemote{}, err } stores.meta = meta if params.repo.BlobBucket != "" { blob, err := gcs.New(ctx, params.repo.BlobBucket, config.Credential) if err != nil { return cmdStoresRemote{}, err } stores.blob = blob } return stores, nil } func paramsToSrcStore(ctx context.Context, params paramsT, create bool) (storage.Store, error) { var err error var consumableStorePath string switch { case params.bundle.DataPath == "": consumableStorePath, err = ioutil.TempDir("", "datamon-mount-destination") if err != nil { return nil, fmt.Errorf("couldn't create temporary directory: %v", err) } case strings.HasPrefix(params.bundle.DataPath, "gs://"): consumableStorePath = params.bundle.DataPath default: consumableStorePath, err = sanitizePath(params.bundle.DataPath) if err != nil { return nil, fmt.Errorf("failed to sanitize destination: %v: %v", params.bundle.DataPath, err) } } if create { createPath(consumableStorePath) } var sourceStore storage.Store if strings.HasPrefix(consumableStorePath, "gs://") { fmt.Println(consumableStorePath[4:]) sourceStore, err = gcs.New(ctx, consumableStorePath[5:], config.Credential) if err != nil { return sourceStore, err } } else { DieIfNotAccessible(consumableStorePath) DieIfNotDirectory(consumableStorePath) sourceStore = localfs.New(afero.NewBasePathFs(afero.NewOsFs(), consumableStorePath)) } return sourceStore, nil } type DestT uint const ( destTEmpty = iota destTMaybeNonEmpty destTNonEmpty ) func paramsToDestStore(params paramsT, destT DestT, tmpdirPrefix string, ) (storage.Store, error) { var err error var consumableStorePath string var destStore storage.Store if tmpdirPrefix != "" && params.bundle.DataPath != "" { tmpdirPrefix = "" } if tmpdirPrefix != "" {
else { consumableStorePath, err = sanitizePath(params.bundle.DataPath) if err != nil { return nil, fmt.Errorf("failed to sanitize destination: %s", params.bundle.DataPath) } createPath(consumableStorePath) } fs := afero.NewBasePathFs(afero.NewOsFs(), consumableStorePath) if destT == destTEmpty { var empty bool empty, err = afero.IsEmpty(fs, "/") if err != nil { return nil, fmt.Errorf("failed path validation: %v", err) } if !empty { return nil, fmt.Errorf("%s should be empty", consumableStorePath) } } else if destT == destTNonEmpty { /* fail-fast impl. partial dupe of model pkg, encoded here to more fully encode intent * of this package independently. */ var ok bool ok, err = afero.DirExists(fs, ".datamon") if err != nil { return nil, fmt.Errorf("failed to look for metadata dir: %v", err) } if !ok { return nil, fmt.Errorf("failed to find metadata dir in %v", consumableStorePath) } } destStore = localfs.New(fs) return destStore, nil } func paramsToContributor(params paramsT) (model.Contributor, error) { if params.repo.ContributorEmail == "" { return model.Contributor{}, fmt.Errorf( "contributor email must be set in config or as a cli param") } if params.repo.ContributorName == "" { return model.Contributor{}, fmt.Errorf( "contributor name must be set in config or as a cli param") } return model.Contributor{ Email: params.repo.ContributorEmail, Name: params.repo.ContributorName, }, nil }
if destT == destTNonEmpty { return nil, fmt.Errorf("can't specify temp dest path and non-empty dir mutually exclusive") } consumableStorePath, err = ioutil.TempDir("", tmpdirPrefix) if err != nil { return nil, fmt.Errorf("couldn't create temporary directory: %v", err) } }
steps.py
# -*- coding: utf-8 -*- # Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U # # This file is part of FI-WARE project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # For those usages not covered by the Apache version 2.0 License please # contact with [email protected] from lettuce import step, world from lettuce_tools.dataset_utils.dataset_utils import DatasetUtils from tools import http from tools import environment_request from tools.tier import Tier from tools.constants import NAME, DESCRIPTION, PRODUCTS, NETWORKS, PAAS,\ TIER_IMAGE dataset_utils = DatasetUtils() @step(u'the paas manager is up and properly configured') def the_paas_manager_is_up_and_properly_configured(step): pass # Nothing to do here, the set up should be done by external means @step(u'a list of tiers has been defined with data:') def a_list_of_tiers_has_been_defined_with_data(step): world.tiers = [] for row in step.hashes: data = dataset_utils.prepare_data(row) tier = Tier(data.get(NAME), world.config[PAAS][TIER_IMAGE]) tier.parse_and_add_products(data.get(PRODUCTS)) tier.parse_and_add_networks(data.get(NETWORKS)) world.tiers.append(tier) @step(u'an environment has already been created with data:') def an_environment_has_already_been_created_with_data(step): data = dataset_utils.prepare_data(step.hashes[0]) world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION)) @step(u'an environment has already been created with the previous tiers and data:') def an_environment_has_already_been_created_with_the_previous_tiers_and_data(step): data = dataset_utils.prepare_data(step.hashes[0]) world.env_requests.add_environment(data.get(NAME), data.get(DESCRIPTION), world.tiers) @step(u'there is no environment with name "([^"]*)" already created') def there_is_no_environment_with_name_already_created(step, name): world.env_requests.delete_environment(name) # Just in case it exists @step(u'I request the details of the environment with name "([^"]*)"') def i_request_the_list_of_existing_environments(step, name): name = dataset_utils.generate_fixed_length_param(name) world.env_requests.get_environment(name) @step(u'I receive an? "([^"]*)" response with data:') def i_receive_a_response_of_type_with_data(step, response_type): s
@step(u'I receive an? "([^"]*)" response with the previous tiers and data:') def i_receive_a_response_of_type_with_the_previous_tiers_and_data(step, response_type): status_code = http.status_codes[response_type] data = dataset_utils.prepare_data(step.hashes[0]) environment_request.check_get_environment_response(world.response, status_code, data.get(NAME), data.get(DESCRIPTION), world.tiers) @step(u'I receive an? "([^"]*)" response$') def i_receive_a_response_of_type(step, response_type): status_code = http.status_codes[response_type] environment_request.check_get_environment_response(world.response, status_code)
tatus_code = http.status_codes[response_type] data = dataset_utils.prepare_data(step.hashes[0]) environment_request.check_get_environment_response(world.response, status_code, data.get(NAME), data.get(DESCRIPTION))
filters.py
import re from typing import Any, Dict from django.http import HttpRequest from django.views.debug import SafeExceptionReporterFilter class ZulipExceptionReporterFilter(SafeExceptionReporterFilter): def get_post_parameters(self, request: HttpRequest) -> Dict[str, Any]: filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy() filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream', 'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key', 'realm_counts', 'installation_counts'] for var in filtered_vars: if var in filtered_post: filtered_post[var] = '**********' return filtered_post def
(val: str) -> str: return re.sub(r"([a-z_-]+=)([^&]+)([&]|$)", r"\1******\3", val)
clean_data_from_query_parameters
longest-arithmetic-subsequence.py
""" 1027. Longest Arithmetic Subsequence Medium Given an array nums of integers, return the length of the longest arithmetic subsequence in nums. Recall that a subsequence of an array nums is a list nums[i1], nums[i2], ..., nums[ik] with 0 <= i1 < i2 < ... < ik <= nums.length - 1, and that a sequence seq is arithmetic if seq[i+1] - seq[i] are all the same value (for 0 <= i < seq.length - 1). Example 1: Input: nums = [3,6,9,12] Output: 4 Explanation: The whole array is an arithmetic sequence with steps of length = 3. Example 2: Input: nums = [9,4,7,2,10] Output: 3 Explanation: The longest arithmetic subsequence is [4,7,10]. Example 3: Input: nums = [20,1,15,3,10,5,8] Output: 4 Explanation: The longest arithmetic subsequence is [20,15,10,5]. Constraints: 2 <= nums.length <= 1000 0 <= nums[i] <= 500 """
# IDEA : DP class Solution: def longestArithSeqLength(self, A): dp = {} for i in range(len(A)): for j in range(i + 1, len(A)): dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1 return max(dp.values()) # V0' # IDEA : HASH TABLE # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution class Solution: def longestArithSeqLength(self, A): aux, cnt, prefix = {a : {} for a in A}, {}, set() for a in A: cnt[a] = cnt[a] + 1 if a in cnt else 1 for b in prefix: if a != b: aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2 prefix.add(a) max_const = max(cnt.values()) max_aux = max(max(d.values()) for a, d in aux.items() if d) return max(max_const, max_aux, 2) # V1 # https://www.796t.com/article.php?id=154559 # http://www.noteanddata.com/leetcode-1027-Longest-Arithmetic-Sequence-Google-Interview-Problem-java-solution-note.html # https://blog.csdn.net/w5688414/article/details/109696664 # V1 # IDEA : HASH # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution class Solution: def longestArithSeqLength(self, A): aux, cnt, prefix = {a : {} for a in A}, {}, set() for a in A: cnt[a] = cnt[a] + 1 if a in cnt else 1 for b in prefix: if a != b: aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2 prefix.add(a) max_const = max(cnt.values()) max_aux = max(max(d.values()) for a, d in aux.items() if d) return max(max_const, max_aux, 2) # V1' # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/275395/python-O(n**2)-solution class Solution: def longestArithSeqLength(self, A): # Constant seq: '0000', O(len(A) ) ct = collections.Counter(A) ans = max(2, max(ct[i] for i in ct)) # Increasing seq:'1234', O(len(A)**2 ) ansdic = {} for i in range(len(A)): for j in range(i): a0, a1, a2 = A[j]*2-A[i], A[j], A[i] if a0 == a1:continue if (a0, a1) in ansdic: ansdic[a1, a2] = ansdic[a0, a1] + 1 ans = max(ansdic[a1, a2], ans) else: ansdic[a1, a2] = 2 return ans # V1'' # IDEA : HASH SET # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274625/simple-hash-Set-Python class Solution(object): def longestArithSeqLength(self, A): res = 2 if len(A) <= 2: return len(A) cnt = {} node = {} mx = {} curr = A[1] - A[0] cnt[(curr,1)] = 2 node[curr] = set() node[curr].add(1) mx[curr] = 2 res = 2 for i in range(2,len(A)): for j in range(i): dis = A[i] - A[j] if dis in node: if j in node[dis]: cnt[(dis,i)] = cnt[(dis,j)] + 1 #node[dis].remove(j) node[dis].add(i) mx[dis] = max(mx[dis], cnt[(dis,i)]) res = max(mx[dis],res) else: cnt[(dis,i)] = 2 node[dis].add(i) else: cnt[(dis,i)] = 2 node[dis] = set() node[dis].add(i) mx[dis] = 2 return res # V1''' # IDEA : DP # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274611/JavaC%2B%2BPython-DP class Solution: def longestArithSeqLength(self, A): dp = {} for i in range(len(A)): for j in range(i + 1, len(A)): dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1 return max(dp.values()) # V1'''' # IDEA : DP # https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/514742/Python-DP class Solution: def longestArithSeqLength(self, A): DP = {} A_len = len(A) for right in range(1, A_len): for left in range(right): diff = A[right] - A[left] #if (diff, left) in DP: # DP[(diff, right)] = DP[(diff, left)] + 1 #else: # DP[(diff, right)] = 2 DP[(diff, right)] = DP.get((diff,left), 1) + 1 return max(DP.values()) # V2
# V0
ssl_match_hostname.py
# Backport of the match_hostname logic introduced in python 3.2 # http://hg.python.org/releasing/3.3.5/file/993955b807b3/Lib/ssl.py import re class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1):
def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
"""Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname)
MaybeAsync.ts
export type MaybeAsync<T> = Promise<T> | T; export const isPromise = <T extends unknown>( test?: MaybeAsync<T> ): test is Promise<T> => !!test && typeof (test as Promise<T>).then === "function"; // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/explicit-module-boundary-types export const executeMaybeAsyncFunction = async <T extends unknown>( func: (...args: unknown[]) => Promise<T> | T, ...args: unknown[] ): Promise<T> => { let result = func(...args); if (isPromise(result)) {
result = await result; } return result; };
index.ts
import { LOCAL_SHOULD_LOG } from '@glimmer/local-debug-flags'; import { ASTv2, Source } from '@glimmer/syntax'; import { LOCAL_LOGGER } from '@glimmer/util'; import { Result } from '../../shared/result'; import * as mir from '../2-encoding/mir'; import { NormalizationState } from './context'; import { VISIT_STMTS } from './visitors/statements'; /** * Normalize the AST from @glimmer/syntax into the HIR. The HIR has special * instructions for keywords like `{{yield}}`, `(has-block)` and * `{{#in-element}}`. * * Most importantly, it also classifies HTML element syntax into: * * 1. simple HTML element (with optional splattributes) * 2. component invocation * * Because the @glimmer/syntax AST gives us a string for an element's tag, * this pass also normalizes that string into an expression.
* // `["x"]` * <this.x /> * * {{#let expr as |t|}} * // `"t"` is normalized into a variable lookup. * <t /> * * // normalized into a path expression whose head is the variable lookup * // `t` and tail is `["input"]`. * <t.input /> * {{/let}} * * // normalized into a free variable lookup for `SomeComponent` (with the * // context `ComponentHead`). * <SomeComponent /> * * // normalized into a path expression whose head is the free variable * // `notInScope` (with the context `Expression`), and whose tail is * // `["SomeComponent"]`. In resolver mode, this path will be rejected later, * // since it cannot serve as an input to the resolver. * <notInScope.SomeComponent /> * ``` */ export default function normalize(source: Source, root: ASTv2.Template): Result<mir.Template> { // create a new context for the normalization pass let state = new NormalizationState(root.table); if (LOCAL_SHOULD_LOG) { LOCAL_LOGGER.groupCollapsed(`pass0: visiting`); LOCAL_LOGGER.log('symbols', root.table); LOCAL_LOGGER.log('source', source); LOCAL_LOGGER.groupEnd(); } let body = VISIT_STMTS.visitList(root.body, state); if (LOCAL_SHOULD_LOG) { if (body.isOk) { LOCAL_LOGGER.log('-> pass0: out', body.value); } else { LOCAL_LOGGER.log('-> pass0: error', body.reason); } } return body.mapOk( (body) => new mir.Template({ loc: root.loc, scope: root.table, body: body.toArray() }) ); }
* * ``` * // normalized into a path expression whose head is `this` and tail is
mod.rs
mod controller_revision; pub use self::controller_revision::ControllerRevision; #[cfg(feature = "api")] pub use self::controller_revision::{CreateNamespacedControllerRevisionOptional, CreateNamespacedControllerRevisionResponse}; #[cfg(feature = "api")] pub use self::controller_revision::DeleteCollectionNamespacedControllerRevisionResponse; #[cfg(feature = "api")] pub use self::controller_revision::DeleteNamespacedControllerRevisionResponse; #[cfg(feature = "api")] pub use self::controller_revision::ListControllerRevisionForAllNamespacesResponse; #[cfg(feature = "api")] pub use self::controller_revision::ListNamespacedControllerRevisionResponse; #[cfg(feature = "api")] pub use self::controller_revision::PatchNamespacedControllerRevisionResponse; #[cfg(feature = "api")] pub use self::controller_revision::{ReadNamespacedControllerRevisionOptional, ReadNamespacedControllerRevisionResponse}; #[cfg(feature = "api")] pub use self::controller_revision::{ReplaceNamespacedControllerRevisionOptional, ReplaceNamespacedControllerRevisionResponse}; #[cfg(feature = "api")] pub use self::controller_revision::WatchControllerRevisionForAllNamespacesResponse; #[cfg(feature = "api")] pub use self::controller_revision::WatchNamespacedControllerRevisionResponse; mod controller_revision_list; pub use self::controller_revision_list::ControllerRevisionList; mod deployment; pub use self::deployment::Deployment; #[cfg(feature = "api")] pub use self::deployment::{CreateNamespacedDeploymentOptional, CreateNamespacedDeploymentResponse}; #[cfg(feature = "api")] pub use self::deployment::DeleteCollectionNamespacedDeploymentResponse; #[cfg(feature = "api")] pub use self::deployment::DeleteNamespacedDeploymentResponse; #[cfg(feature = "api")] pub use self::deployment::ListDeploymentForAllNamespacesResponse; #[cfg(feature = "api")] pub use self::deployment::ListNamespacedDeploymentResponse; #[cfg(feature = "api")] pub use self::deployment::PatchNamespacedDeploymentResponse; #[cfg(feature = "api")] pub use self::deployment::PatchNamespacedDeploymentStatusResponse; #[cfg(feature = "api")] pub use self::deployment::{ReadNamespacedDeploymentOptional, ReadNamespacedDeploymentResponse}; #[cfg(feature = "api")] pub use self::deployment::{ReadNamespacedDeploymentStatusOptional, ReadNamespacedDeploymentStatusResponse}; #[cfg(feature = "api")] pub use self::deployment::{ReplaceNamespacedDeploymentOptional, ReplaceNamespacedDeploymentResponse}; #[cfg(feature = "api")] pub use self::deployment::{ReplaceNamespacedDeploymentStatusOptional, ReplaceNamespacedDeploymentStatusResponse}; #[cfg(feature = "api")] pub use self::deployment::WatchDeploymentForAllNamespacesResponse; #[cfg(feature = "api")] pub use self::deployment::WatchNamespacedDeploymentResponse; mod deployment_condition; pub use self::deployment_condition::DeploymentCondition; mod deployment_list; pub use self::deployment_list::DeploymentList; mod deployment_rollback; pub use self::deployment_rollback::DeploymentRollback; #[cfg(feature = "api")] pub use self::deployment_rollback::{CreateNamespacedDeploymentRollbackOptional, CreateNamespacedDeploymentRollbackResponse}; mod deployment_spec; pub use self::deployment_spec::DeploymentSpec; mod deployment_status; pub use self::deployment_status::DeploymentStatus;
mod deployment_strategy; pub use self::deployment_strategy::DeploymentStrategy; mod rollback_config; pub use self::rollback_config::RollbackConfig; mod rolling_update_deployment; pub use self::rolling_update_deployment::RollingUpdateDeployment; mod rolling_update_stateful_set_strategy; pub use self::rolling_update_stateful_set_strategy::RollingUpdateStatefulSetStrategy; mod scale; pub use self::scale::Scale; #[cfg(feature = "api")] pub use self::scale::PatchNamespacedDeploymentScaleResponse; #[cfg(feature = "api")] pub use self::scale::PatchNamespacedStatefulSetScaleResponse; #[cfg(feature = "api")] pub use self::scale::{ReadNamespacedDeploymentScaleOptional, ReadNamespacedDeploymentScaleResponse}; #[cfg(feature = "api")] pub use self::scale::{ReadNamespacedStatefulSetScaleOptional, ReadNamespacedStatefulSetScaleResponse}; #[cfg(feature = "api")] pub use self::scale::{ReplaceNamespacedDeploymentScaleOptional, ReplaceNamespacedDeploymentScaleResponse}; #[cfg(feature = "api")] pub use self::scale::{ReplaceNamespacedStatefulSetScaleOptional, ReplaceNamespacedStatefulSetScaleResponse}; mod scale_spec; pub use self::scale_spec::ScaleSpec; mod scale_status; pub use self::scale_status::ScaleStatus; mod stateful_set; pub use self::stateful_set::StatefulSet; #[cfg(feature = "api")] pub use self::stateful_set::{CreateNamespacedStatefulSetOptional, CreateNamespacedStatefulSetResponse}; #[cfg(feature = "api")] pub use self::stateful_set::DeleteCollectionNamespacedStatefulSetResponse; #[cfg(feature = "api")] pub use self::stateful_set::DeleteNamespacedStatefulSetResponse; #[cfg(feature = "api")] pub use self::stateful_set::ListNamespacedStatefulSetResponse; #[cfg(feature = "api")] pub use self::stateful_set::ListStatefulSetForAllNamespacesResponse; #[cfg(feature = "api")] pub use self::stateful_set::PatchNamespacedStatefulSetResponse; #[cfg(feature = "api")] pub use self::stateful_set::PatchNamespacedStatefulSetStatusResponse; #[cfg(feature = "api")] pub use self::stateful_set::{ReadNamespacedStatefulSetOptional, ReadNamespacedStatefulSetResponse}; #[cfg(feature = "api")] pub use self::stateful_set::{ReadNamespacedStatefulSetStatusOptional, ReadNamespacedStatefulSetStatusResponse}; #[cfg(feature = "api")] pub use self::stateful_set::{ReplaceNamespacedStatefulSetOptional, ReplaceNamespacedStatefulSetResponse}; #[cfg(feature = "api")] pub use self::stateful_set::{ReplaceNamespacedStatefulSetStatusOptional, ReplaceNamespacedStatefulSetStatusResponse}; #[cfg(feature = "api")] pub use self::stateful_set::WatchNamespacedStatefulSetResponse; #[cfg(feature = "api")] pub use self::stateful_set::WatchStatefulSetForAllNamespacesResponse; mod stateful_set_list; pub use self::stateful_set_list::StatefulSetList; mod stateful_set_spec; pub use self::stateful_set_spec::StatefulSetSpec; mod stateful_set_status; pub use self::stateful_set_status::StatefulSetStatus; mod stateful_set_update_strategy; pub use self::stateful_set_update_strategy::StatefulSetUpdateStrategy;
api_op_EnableAllFeatures.go
// Code generated by smithy-go-codegen DO NOT EDIT. package organizations import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/organizations/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Enables all features in an organization. This enables the use of organization // policies that can restrict the services and actions that can be called in each // account. Until you enable all features, you have access only to consolidated // billing, and you can't use any of the advanced account administration features // that AWS Organizations supports. For more information, see Enabling All Features // in Your Organization // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) // in the AWS Organizations User Guide. This operation is required only for // organizations that were created explicitly with only the consolidated billing // features enabled. Calling this operation sends a handshake to every invited // account in the organization. The feature set change can be finalized and the // additional features enabled only after all administrators in the invited // accounts approve the change by accepting the handshake. After you enable all // features, you can separately enable or disable individual policy types in a root // using EnablePolicyType and DisablePolicyType. To see the status of policy types // in a root, use ListRoots. After all invited member accounts accept the // handshake, you finalize the feature set change by accepting the handshake that // contains "Action": "ENABLE_ALL_FEATURES". This completes the change. After you // enable all features in your organization, the management account in the // organization can apply policies on all member accounts. These policies can // restrict what users and even administrators in those accounts can do. The // management account can apply policies that prevent accounts from leaving the // organization. Ensure that your account administrators are aware of this. This // operation can be called only from the organization's management account. func (c *Client) EnableAllFeatures(ctx context.Context, params *EnableAllFeaturesInput, optFns ...func(*Options)) (*EnableAllFeaturesOutput, error) { if params == nil { params = &EnableAllFeaturesInput{} } result, metadata, err := c.invokeOperation(ctx, "EnableAllFeatures", params, optFns, c.addOperationEnableAllFeaturesMiddlewares) if err != nil { return nil, err } out := result.(*EnableAllFeaturesOutput) out.ResultMetadata = metadata return out, nil } type EnableAllFeaturesInput struct { noSmithyDocumentSerde
type EnableAllFeaturesOutput struct { // A structure that contains details about the handshake created to support this // request to enable all features in the organization. Handshake *types.Handshake // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationEnableAllFeaturesMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableAllFeatures{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableAllFeatures{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableAllFeatures(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opEnableAllFeatures(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "organizations", OperationName: "EnableAllFeatures", } }
}
client.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[derive(Debug)] pub(crate) struct Handle { pub(crate) client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, pub(crate) conf: crate::Config, } /// Client for Amazon Simple Workflow Service /// /// Client for invoking operations on Amazon Simple Workflow Service. Each operation on Amazon Simple Workflow Service is a method on this /// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service. /// /// # Examples /// **Constructing a client and invoking an operation** /// ```rust,no_run /// # async fn docs() { /// // create a shared configuration. This can be used & shared between multiple service clients. /// let shared_config = aws_config::load_from_env().await; /// let client = aws_sdk_swf::Client::new(&shared_config); /// // invoke an operation /// /* let rsp = client /// .<operation_name>(). /// .<param>("some value") /// .send().await; */ /// # } /// ``` /// **Constructing a client with custom configuration** /// ```rust,no_run /// use aws_config::RetryConfig; /// # async fn docs() { /// let shared_config = aws_config::load_from_env().await; /// let config = aws_sdk_swf::config::Builder::from(&shared_config) /// .retry_config(RetryConfig::disabled()) /// .build(); /// let client = aws_sdk_swf::Client::from_conf(config); /// # } #[derive(std::fmt::Debug)] pub struct Client { handle: std::sync::Arc<Handle>, } impl std::clone::Clone for Client { fn clone(&self) -> Self { Self { handle: self.handle.clone(), } } } #[doc(inline)] pub use aws_smithy_client::Builder; impl From< aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, > for Client { fn from( client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, ) -> Self { Self::with_config(client, crate::Config::builder().build()) } } impl Client { /// Creates a client with the given service configuration. pub fn with_config( client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, conf: crate::Config, ) -> Self { Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Returns the client's configuration. pub fn conf(&self) -> &crate::Config { &self.handle.conf } } impl Client { /// Constructs a fluent builder for the [`CountClosedWorkflowExecutions`](crate::client::fluent_builders::CountClosedWorkflowExecutions) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_domain): <p>The name of the domain containing the workflow executions to count.</p> /// - [`start_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::start_time_filter) / [`set_start_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_start_time_filter): <p>If specified, only workflow executions that meet the start time criteria of the filter are counted.</p> <note> <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> </note> /// - [`close_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::close_time_filter) / [`set_close_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_close_time_filter): <p>If specified, only workflow executions that meet the close time criteria of the filter are counted.</p> <note> <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> </note> /// - [`execution_filter(WorkflowExecutionFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::execution_filter) / [`set_execution_filter(Option<WorkflowExecutionFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_execution_filter): <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`type_filter(WorkflowTypeFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::type_filter) / [`set_type_filter(Option<WorkflowTypeFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_type_filter): <p>If specified, indicates the type of the workflow executions to be counted.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`tag_filter(TagFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::tag_filter) / [`set_tag_filter(Option<TagFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_tag_filter): <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`close_status_filter(CloseStatusFilter)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::close_status_filter) / [`set_close_status_filter(Option<CloseStatusFilter>)`](crate::client::fluent_builders::CountClosedWorkflowExecutions::set_close_status_filter): <p>If specified, only workflow executions that match this close status are counted. This filter has an affect only if <code>executionStatus</code> is specified as <code>CLOSED</code>.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - On success, responds with [`CountClosedWorkflowExecutionsOutput`](crate::output::CountClosedWorkflowExecutionsOutput) with field(s): /// - [`count(i32)`](crate::output::CountClosedWorkflowExecutionsOutput::count): <p>The number of workflow executions.</p> /// - [`truncated(bool)`](crate::output::CountClosedWorkflowExecutionsOutput::truncated): <p>If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.</p> /// - On failure, responds with [`SdkError<CountClosedWorkflowExecutionsError>`](crate::error::CountClosedWorkflowExecutionsError) pub fn count_closed_workflow_executions( &self, ) -> fluent_builders::CountClosedWorkflowExecutions { fluent_builders::CountClosedWorkflowExecutions::new(self.handle.clone()) } /// Constructs a fluent builder for the [`CountOpenWorkflowExecutions`](crate::client::fluent_builders::CountOpenWorkflowExecutions) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::set_domain): <p>The name of the domain containing the workflow executions to count.</p> /// - [`start_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::start_time_filter) / [`set_start_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::set_start_time_filter): <p>Specifies the start time criteria that workflow executions must meet in order to be counted.</p> /// - [`type_filter(WorkflowTypeFilter)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::type_filter) / [`set_type_filter(Option<WorkflowTypeFilter>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::set_type_filter): <p>Specifies the type of the workflow executions to be counted.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`tag_filter(TagFilter)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::tag_filter) / [`set_tag_filter(Option<TagFilter>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::set_tag_filter): <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`execution_filter(WorkflowExecutionFilter)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::execution_filter) / [`set_execution_filter(Option<WorkflowExecutionFilter>)`](crate::client::fluent_builders::CountOpenWorkflowExecutions::set_execution_filter): <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - On success, responds with [`CountOpenWorkflowExecutionsOutput`](crate::output::CountOpenWorkflowExecutionsOutput) with field(s): /// - [`count(i32)`](crate::output::CountOpenWorkflowExecutionsOutput::count): <p>The number of workflow executions.</p> /// - [`truncated(bool)`](crate::output::CountOpenWorkflowExecutionsOutput::truncated): <p>If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.</p> /// - On failure, responds with [`SdkError<CountOpenWorkflowExecutionsError>`](crate::error::CountOpenWorkflowExecutionsError) pub fn count_open_workflow_executions(&self) -> fluent_builders::CountOpenWorkflowExecutions { fluent_builders::CountOpenWorkflowExecutions::new(self.handle.clone()) } /// Constructs a fluent builder for the [`CountPendingActivityTasks`](crate::client::fluent_builders::CountPendingActivityTasks) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::CountPendingActivityTasks::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::CountPendingActivityTasks::set_domain): <p>The name of the domain that contains the task list.</p> /// - [`task_list(TaskList)`](crate::client::fluent_builders::CountPendingActivityTasks::task_list) / [`set_task_list(Option<TaskList>)`](crate::client::fluent_builders::CountPendingActivityTasks::set_task_list): <p>The name of the task list.</p> /// - On success, responds with [`CountPendingActivityTasksOutput`](crate::output::CountPendingActivityTasksOutput) with field(s): /// - [`count(i32)`](crate::output::CountPendingActivityTasksOutput::count): <p>The number of tasks in the task list.</p> /// - [`truncated(bool)`](crate::output::CountPendingActivityTasksOutput::truncated): <p>If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.</p> /// - On failure, responds with [`SdkError<CountPendingActivityTasksError>`](crate::error::CountPendingActivityTasksError) pub fn count_pending_activity_tasks(&self) -> fluent_builders::CountPendingActivityTasks { fluent_builders::CountPendingActivityTasks::new(self.handle.clone()) } /// Constructs a fluent builder for the [`CountPendingDecisionTasks`](crate::client::fluent_builders::CountPendingDecisionTasks) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::CountPendingDecisionTasks::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::CountPendingDecisionTasks::set_domain): <p>The name of the domain that contains the task list.</p> /// - [`task_list(TaskList)`](crate::client::fluent_builders::CountPendingDecisionTasks::task_list) / [`set_task_list(Option<TaskList>)`](crate::client::fluent_builders::CountPendingDecisionTasks::set_task_list): <p>The name of the task list.</p> /// - On success, responds with [`CountPendingDecisionTasksOutput`](crate::output::CountPendingDecisionTasksOutput) with field(s): /// - [`count(i32)`](crate::output::CountPendingDecisionTasksOutput::count): <p>The number of tasks in the task list.</p> /// - [`truncated(bool)`](crate::output::CountPendingDecisionTasksOutput::truncated): <p>If set to true, indicates that the actual count was more than the maximum supported by this API and the count returned is the truncated value.</p> /// - On failure, responds with [`SdkError<CountPendingDecisionTasksError>`](crate::error::CountPendingDecisionTasksError) pub fn count_pending_decision_tasks(&self) -> fluent_builders::CountPendingDecisionTasks { fluent_builders::CountPendingDecisionTasks::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DeprecateActivityType`](crate::client::fluent_builders::DeprecateActivityType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::DeprecateActivityType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::DeprecateActivityType::set_domain): <p>The name of the domain in which the activity type is registered.</p> /// - [`activity_type(ActivityType)`](crate::client::fluent_builders::DeprecateActivityType::activity_type) / [`set_activity_type(Option<ActivityType>)`](crate::client::fluent_builders::DeprecateActivityType::set_activity_type): <p>The activity type to deprecate.</p> /// - On success, responds with [`DeprecateActivityTypeOutput`](crate::output::DeprecateActivityTypeOutput) /// - On failure, responds with [`SdkError<DeprecateActivityTypeError>`](crate::error::DeprecateActivityTypeError) pub fn deprecate_activity_type(&self) -> fluent_builders::DeprecateActivityType { fluent_builders::DeprecateActivityType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DeprecateDomain`](crate::client::fluent_builders::DeprecateDomain) operation. /// /// - The fluent builder is configurable: /// - [`name(impl Into<String>)`](crate::client::fluent_builders::DeprecateDomain::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::DeprecateDomain::set_name): <p>The name of the domain to deprecate.</p> /// - On success, responds with [`DeprecateDomainOutput`](crate::output::DeprecateDomainOutput) /// - On failure, responds with [`SdkError<DeprecateDomainError>`](crate::error::DeprecateDomainError) pub fn deprecate_domain(&self) -> fluent_builders::DeprecateDomain { fluent_builders::DeprecateDomain::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DeprecateWorkflowType`](crate::client::fluent_builders::DeprecateWorkflowType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::DeprecateWorkflowType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::DeprecateWorkflowType::set_domain): <p>The name of the domain in which the workflow type is registered.</p> /// - [`workflow_type(WorkflowType)`](crate::client::fluent_builders::DeprecateWorkflowType::workflow_type) / [`set_workflow_type(Option<WorkflowType>)`](crate::client::fluent_builders::DeprecateWorkflowType::set_workflow_type): <p>The workflow type to deprecate.</p> /// - On success, responds with [`DeprecateWorkflowTypeOutput`](crate::output::DeprecateWorkflowTypeOutput) /// - On failure, responds with [`SdkError<DeprecateWorkflowTypeError>`](crate::error::DeprecateWorkflowTypeError) pub fn deprecate_workflow_type(&self) -> fluent_builders::DeprecateWorkflowType { fluent_builders::DeprecateWorkflowType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeActivityType`](crate::client::fluent_builders::DescribeActivityType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::DescribeActivityType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::DescribeActivityType::set_domain): <p>The name of the domain in which the activity type is registered.</p> /// - [`activity_type(ActivityType)`](crate::client::fluent_builders::DescribeActivityType::activity_type) / [`set_activity_type(Option<ActivityType>)`](crate::client::fluent_builders::DescribeActivityType::set_activity_type): <p>The activity type to get information about. Activity types are identified by the <code>name</code> and <code>version</code> that were supplied when the activity was registered.</p> /// - On success, responds with [`DescribeActivityTypeOutput`](crate::output::DescribeActivityTypeOutput) with field(s): /// - [`type_info(Option<ActivityTypeInfo>)`](crate::output::DescribeActivityTypeOutput::type_info): <p>General information about the activity type.</p> <p>The status of activity type (returned in the ActivityTypeInfo structure) can be one of the following.</p> <ul> <li> <p> <code>REGISTERED</code> – The type is registered and available. Workers supporting this type should be running. </p> </li> <li> <p> <code>DEPRECATED</code> – The type was deprecated using <code>DeprecateActivityType</code>, but is still in use. You should keep workers supporting this type running. You cannot create new tasks of this type. </p> </li> </ul> /// - [`configuration(Option<ActivityTypeConfiguration>)`](crate::output::DescribeActivityTypeOutput::configuration): <p>The configuration settings registered with the activity type.</p> /// - On failure, responds with [`SdkError<DescribeActivityTypeError>`](crate::error::DescribeActivityTypeError) pub fn describe_activity_type(&self) -> fluent_builders::DescribeActivityType { fluent_builders::DescribeActivityType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeDomain`](crate::client::fluent_builders::DescribeDomain) operation. /// /// - The fluent builder is configurable: /// - [`name(impl Into<String>)`](crate::client::fluent_builders::DescribeDomain::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::DescribeDomain::set_name): <p>The name of the domain to describe.</p> /// - On success, responds with [`DescribeDomainOutput`](crate::output::DescribeDomainOutput) with field(s): /// - [`domain_info(Option<DomainInfo>)`](crate::output::DescribeDomainOutput::domain_info): <p>The basic information about a domain, such as its name, status, and description.</p> /// - [`configuration(Option<DomainConfiguration>)`](crate::output::DescribeDomainOutput::configuration): <p>The domain configuration. Currently, this includes only the domain's retention period.</p> /// - On failure, responds with [`SdkError<DescribeDomainError>`](crate::error::DescribeDomainError) pub fn describe_domain(&self) -> fluent_builders::DescribeDomain { fluent_builders::DescribeDomain::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeWorkflowExecution`](crate::client::fluent_builders::DescribeWorkflowExecution) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::DescribeWorkflowExecution::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::DescribeWorkflowExecution::set_domain): <p>The name of the domain containing the workflow execution.</p> /// - [`execution(WorkflowExecution)`](crate::client::fluent_builders::DescribeWorkflowExecution::execution) / [`set_execution(Option<WorkflowExecution>)`](crate::client::fluent_builders::DescribeWorkflowExecution::set_execution): <p>The workflow execution to describe.</p> /// - On success, responds with [`DescribeWorkflowExecutionOutput`](crate::output::DescribeWorkflowExecutionOutput) with field(s): /// - [`execution_info(Option<WorkflowExecutionInfo>)`](crate::output::DescribeWorkflowExecutionOutput::execution_info): <p>Information about the workflow execution.</p> /// - [`execution_configuration(Option<WorkflowExecutionConfiguration>)`](crate::output::DescribeWorkflowExecutionOutput::execution_configuration): <p>The configuration settings for this workflow execution including timeout values, tasklist etc.</p> /// - [`open_counts(Option<WorkflowExecutionOpenCounts>)`](crate::output::DescribeWorkflowExecutionOutput::open_counts): <p>The number of tasks for this workflow execution. This includes open and closed tasks of all types.</p> /// - [`latest_activity_task_timestamp(Option<DateTime>)`](crate::output::DescribeWorkflowExecutionOutput::latest_activity_task_timestamp): <p>The time when the last activity task was scheduled for this workflow execution. You can use this information to determine if the workflow has not made progress for an unusually long period of time and might require a corrective action.</p> /// - [`latest_execution_context(Option<String>)`](crate::output::DescribeWorkflowExecutionOutput::latest_execution_context): <p>The latest executionContext provided by the decider for this workflow execution. A decider can provide an executionContext (a free-form string) when closing a decision task using <code>RespondDecisionTaskCompleted</code>.</p> /// - On failure, responds with [`SdkError<DescribeWorkflowExecutionError>`](crate::error::DescribeWorkflowExecutionError) pub fn describe_workflow_execution(&self) -> fluent_builders::DescribeWorkflowExecution { fluent_builders::DescribeWorkflowExecution::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeWorkflowType`](crate::client::fluent_builders::DescribeWorkflowType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::DescribeWorkflowType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::DescribeWorkflowType::set_domain): <p>The name of the domain in which this workflow type is registered.</p> /// - [`workflow_type(WorkflowType)`](crate::client::fluent_builders::DescribeWorkflowType::workflow_type) / [`set_workflow_type(Option<WorkflowType>)`](crate::client::fluent_builders::DescribeWorkflowType::set_workflow_type): <p>The workflow type to describe.</p> /// - On success, responds with [`DescribeWorkflowTypeOutput`](crate::output::DescribeWorkflowTypeOutput) with field(s): /// - [`type_info(Option<WorkflowTypeInfo>)`](crate::output::DescribeWorkflowTypeOutput::type_info): <p>General information about the workflow type.</p> <p>The status of the workflow type (returned in the WorkflowTypeInfo structure) can be one of the following.</p> <ul> <li> <p> <code>REGISTERED</code> – The type is registered and available. Workers supporting this type should be running.</p> </li> <li> <p> <code>DEPRECATED</code> – The type was deprecated using <code>DeprecateWorkflowType</code>, but is still in use. You should keep workers supporting this type running. You cannot create new workflow executions of this type.</p> </li> </ul> /// - [`configuration(Option<WorkflowTypeConfiguration>)`](crate::output::DescribeWorkflowTypeOutput::configuration): <p>Configuration settings of the workflow type registered through <code>RegisterWorkflowType</code> </p> /// - On failure, responds with [`SdkError<DescribeWorkflowTypeError>`](crate::error::DescribeWorkflowTypeError) pub fn describe_workflow_type(&self) -> fluent_builders::DescribeWorkflowType { fluent_builders::DescribeWorkflowType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`GetWorkflowExecutionHistory`](crate::client::fluent_builders::GetWorkflowExecutionHistory) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::GetWorkflowExecutionHistory::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::set_domain): <p>The name of the domain containing the workflow execution.</p> /// - [`execution(WorkflowExecution)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::execution) / [`set_execution(Option<WorkflowExecution>)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::set_execution): <p>Specifies the workflow execution for which to return the history.</p> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::GetWorkflowExecutionHistory::set_reverse_order): <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimeStamp</code> of the events.</p> /// - On success, responds with [`GetWorkflowExecutionHistoryOutput`](crate::output::GetWorkflowExecutionHistoryOutput) with field(s): /// - [`events(Option<Vec<HistoryEvent>>)`](crate::output::GetWorkflowExecutionHistoryOutput::events): <p>The list of history events.</p> /// - [`next_page_token(Option<String>)`](crate::output::GetWorkflowExecutionHistoryOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<GetWorkflowExecutionHistoryError>`](crate::error::GetWorkflowExecutionHistoryError) pub fn get_workflow_execution_history(&self) -> fluent_builders::GetWorkflowExecutionHistory { fluent_builders::GetWorkflowExecutionHistory::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListActivityTypes`](crate::client::fluent_builders::ListActivityTypes) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListActivityTypes::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::ListActivityTypes::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::ListActivityTypes::set_domain): <p>The name of the domain in which the activity types have been registered.</p> /// - [`name(impl Into<String>)`](crate::client::fluent_builders::ListActivityTypes::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::ListActivityTypes::set_name): <p>If specified, only lists the activity types that have this name.</p> /// - [`registration_status(RegistrationStatus)`](crate::client::fluent_builders::ListActivityTypes::registration_status) / [`set_registration_status(Option<RegistrationStatus>)`](crate::client::fluent_builders::ListActivityTypes::set_registration_status): <p>Specifies the registration status of the activity types to list.</p> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::ListActivityTypes::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::ListActivityTypes::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::ListActivityTypes::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::ListActivityTypes::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::ListActivityTypes::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::ListActivityTypes::set_reverse_order): <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the activity types.</p> /// - On success, responds with [`ListActivityTypesOutput`](crate::output::ListActivityTypesOutput) with field(s): /// - [`type_infos(Option<Vec<ActivityTypeInfo>>)`](crate::output::ListActivityTypesOutput::type_infos): <p>List of activity type information.</p> /// - [`next_page_token(Option<String>)`](crate::output::ListActivityTypesOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<ListActivityTypesError>`](crate::error::ListActivityTypesError) pub fn list_activity_types(&self) -> fluent_builders::ListActivityTypes { fluent_builders::ListActivityTypes::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListClosedWorkflowExecutions`](crate::client::fluent_builders::ListClosedWorkflowExecutions) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListClosedWorkflowExecutions::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_domain): <p>The name of the domain that contains the workflow executions to list.</p> /// - [`start_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::start_time_filter) / [`set_start_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_start_time_filter): <p>If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.</p> <note> <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> </note> /// - [`close_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::close_time_filter) / [`set_close_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_close_time_filter): <p>If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.</p> <note> <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> </note> /// - [`execution_filter(WorkflowExecutionFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::execution_filter) / [`set_execution_filter(Option<WorkflowExecutionFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_execution_filter): <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`close_status_filter(CloseStatusFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::close_status_filter) / [`set_close_status_filter(Option<CloseStatusFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_close_status_filter): <p>If specified, only workflow executions that match this <i>close status</i> are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`type_filter(WorkflowTypeFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::type_filter) / [`set_type_filter(Option<WorkflowTypeFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_type_filter): <p>If specified, only executions of the type specified in the filter are returned.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`tag_filter(TagFilter)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::tag_filter) / [`set_tag_filter(Option<TagFilter>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_tag_filter): <p>If specified, only executions that have the matching tag are listed.</p> <note> <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::ListClosedWorkflowExecutions::set_reverse_order): <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.</p> /// - On success, responds with [`ListClosedWorkflowExecutionsOutput`](crate::output::ListClosedWorkflowExecutionsOutput) with field(s): /// - [`execution_infos(Option<Vec<WorkflowExecutionInfo>>)`](crate::output::ListClosedWorkflowExecutionsOutput::execution_infos): <p>The list of workflow information structures.</p> /// - [`next_page_token(Option<String>)`](crate::output::ListClosedWorkflowExecutionsOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<ListClosedWorkflowExecutionsError>`](crate::error::ListClosedWorkflowExecutionsError) pub fn list_closed_workflow_executions(&self) -> fluent_builders::ListClosedWorkflowExecutions { fluent_builders::ListClosedWorkflowExecutions::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListDomains`](crate::client::fluent_builders::ListDomains) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListDomains::into_paginator). /// /// - The fluent builder is configurable: /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::ListDomains::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::ListDomains::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`registration_status(RegistrationStatus)`](crate::client::fluent_builders::ListDomains::registration_status) / [`set_registration_status(Option<RegistrationStatus>)`](crate::client::fluent_builders::ListDomains::set_registration_status): <p>Specifies the registration status of the domains to list.</p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::ListDomains::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::ListDomains::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::ListDomains::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::ListDomains::set_reverse_order): <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the domains.</p> /// - On success, responds with [`ListDomainsOutput`](crate::output::ListDomainsOutput) with field(s): /// - [`domain_infos(Option<Vec<DomainInfo>>)`](crate::output::ListDomainsOutput::domain_infos): <p>A list of DomainInfo structures.</p> /// - [`next_page_token(Option<String>)`](crate::output::ListDomainsOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<ListDomainsError>`](crate::error::ListDomainsError) pub fn list_domains(&self) -> fluent_builders::ListDomains { fluent_builders::ListDomains::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListOpenWorkflowExecutions`](crate::client::fluent_builders::ListOpenWorkflowExecutions) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListOpenWorkflowExecutions::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_domain): <p>The name of the domain that contains the workflow executions to list.</p> /// - [`start_time_filter(ExecutionTimeFilter)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::start_time_filter) / [`set_start_time_filter(Option<ExecutionTimeFilter>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_start_time_filter): <p>Workflow executions are included in the returned results based on whether their start times are within the range specified by this filter.</p> /// - [`type_filter(WorkflowTypeFilter)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::type_filter) / [`set_type_filter(Option<WorkflowTypeFilter>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_type_filter): <p>If specified, only executions of the type specified in the filter are returned.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`tag_filter(TagFilter)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::tag_filter) / [`set_tag_filter(Option<TagFilter>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_tag_filter): <p>If specified, only executions that have the matching tag are listed.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_reverse_order): <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start time of the executions.</p> /// - [`execution_filter(WorkflowExecutionFilter)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::execution_filter) / [`set_execution_filter(Option<WorkflowExecutionFilter>)`](crate::client::fluent_builders::ListOpenWorkflowExecutions::set_execution_filter): <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> </note> /// - On success, responds with [`ListOpenWorkflowExecutionsOutput`](crate::output::ListOpenWorkflowExecutionsOutput) with field(s): /// - [`execution_infos(Option<Vec<WorkflowExecutionInfo>>)`](crate::output::ListOpenWorkflowExecutionsOutput::execution_infos): <p>The list of workflow information structures.</p> /// - [`next_page_token(Option<String>)`](crate::output::ListOpenWorkflowExecutionsOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<ListOpenWorkflowExecutionsError>`](crate::error::ListOpenWorkflowExecutionsError) pub fn list_open_workflow_executions(&self) -> fluent_builders::ListOpenWorkflowExecutions { fluent_builders::ListOpenWorkflowExecutions::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::ListTagsForResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::ListTagsForResource::set_resource_arn): <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> /// - On success, responds with [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) with field(s): /// - [`tags(Option<Vec<ResourceTag>>)`](crate::output::ListTagsForResourceOutput::tags): <p>An array of tags associated with the domain.</p> /// - On failure, responds with [`SdkError<ListTagsForResourceError>`](crate::error::ListTagsForResourceError) pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource { fluent_builders::ListTagsForResource::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListWorkflowTypes`](crate::client::fluent_builders::ListWorkflowTypes) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListWorkflowTypes::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::ListWorkflowTypes::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::ListWorkflowTypes::set_domain): <p>The name of the domain in which the workflow types have been registered.</p> /// - [`name(impl Into<String>)`](crate::client::fluent_builders::ListWorkflowTypes::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::ListWorkflowTypes::set_name): <p>If specified, lists the workflow type with this name.</p> /// - [`registration_status(RegistrationStatus)`](crate::client::fluent_builders::ListWorkflowTypes::registration_status) / [`set_registration_status(Option<RegistrationStatus>)`](crate::client::fluent_builders::ListWorkflowTypes::set_registration_status): <p>Specifies the registration status of the workflow types to list.</p> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::ListWorkflowTypes::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::ListWorkflowTypes::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::ListWorkflowTypes::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::ListWorkflowTypes::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::ListWorkflowTypes::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::ListWorkflowTypes::set_reverse_order): <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in ascending alphabetical order of the <code>name</code> of the workflow types.</p> /// - On success, responds with [`ListWorkflowTypesOutput`](crate::output::ListWorkflowTypesOutput) with field(s): /// - [`type_infos(Option<Vec<WorkflowTypeInfo>>)`](crate::output::ListWorkflowTypesOutput::type_infos): <p>The list of workflow type information.</p> /// - [`next_page_token(Option<String>)`](crate::output::ListWorkflowTypesOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - On failure, responds with [`SdkError<ListWorkflowTypesError>`](crate::error::ListWorkflowTypesError) pub fn list_workflow_types(&self) -> fluent_builders::ListWorkflowTypes { fluent_builders::ListWorkflowTypes::new(self.handle.clone()) } /// Constructs a fluent builder for the [`PollForActivityTask`](crate::client::fluent_builders::PollForActivityTask) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::PollForActivityTask::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::PollForActivityTask::set_domain): <p>The name of the domain that contains the task lists being polled.</p> /// - [`task_list(TaskList)`](crate::client::fluent_builders::PollForActivityTask::task_list) / [`set_task_list(Option<TaskList>)`](crate::client::fluent_builders::PollForActivityTask::set_task_list): <p>Specifies the task list to poll for activity tasks.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`identity(impl Into<String>)`](crate::client::fluent_builders::PollForActivityTask::identity) / [`set_identity(Option<String>)`](crate::client::fluent_builders::PollForActivityTask::set_identity): <p>Identity of the worker making the request, recorded in the <code>ActivityTaskStarted</code> event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> /// - On success, responds with [`PollForActivityTaskOutput`](crate::output::PollForActivityTaskOutput) with field(s): /// - [`task_token(Option<String>)`](crate::output::PollForActivityTaskOutput::task_token): <p>The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.</p> /// - [`activity_id(Option<String>)`](crate::output::PollForActivityTaskOutput::activity_id): <p>The unique ID of the task.</p> /// - [`started_event_id(i64)`](crate::output::PollForActivityTaskOutput::started_event_id): <p>The ID of the <code>ActivityTaskStarted</code> event recorded in the history.</p> /// - [`workflow_execution(Option<WorkflowExecution>)`](crate::output::PollForActivityTaskOutput::workflow_execution): <p>The workflow execution that started this activity task.</p> /// - [`activity_type(Option<ActivityType>)`](crate::output::PollForActivityTaskOutput::activity_type): <p>The type of this activity task.</p> /// - [`input(Option<String>)`](crate::output::PollForActivityTaskOutput::input): <p>The inputs provided when the activity task was scheduled. The form of the input is user defined and should be meaningful to the activity implementation.</p> /// - On failure, responds with [`SdkError<PollForActivityTaskError>`](crate::error::PollForActivityTaskError) pub fn poll_for_activity_task(&self) -> fluent_builders::PollForActivityTask { fluent_builders::PollForActivityTask::new(self.handle.clone()) } /// Constructs a fluent builder for the [`PollForDecisionTask`](crate::client::fluent_builders::PollForDecisionTask) operation. /// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::PollForDecisionTask::into_paginator). /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::PollForDecisionTask::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::PollForDecisionTask::set_domain): <p>The name of the domain containing the task lists to poll.</p> /// - [`task_list(TaskList)`](crate::client::fluent_builders::PollForDecisionTask::task_list) / [`set_task_list(Option<TaskList>)`](crate::client::fluent_builders::PollForDecisionTask::set_task_list): <p>Specifies the task list to poll for decision tasks.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`identity(impl Into<String>)`](crate::client::fluent_builders::PollForDecisionTask::identity) / [`set_identity(Option<String>)`](crate::client::fluent_builders::PollForDecisionTask::set_identity): <p>Identity of the decider making the request, which is recorded in the DecisionTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> /// - [`next_page_token(impl Into<String>)`](crate::client::fluent_builders::PollForDecisionTask::next_page_token) / [`set_next_page_token(Option<String>)`](crate::client::fluent_builders::PollForDecisionTask::set_next_page_token): <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> <note> <p>The <code>nextPageToken</code> returned by this action cannot be used with <code>GetWorkflowExecutionHistory</code> to get the next page. You must call <code>PollForDecisionTask</code> again (with the <code>nextPageToken</code>) to retrieve the next page of history records. Calling <code>PollForDecisionTask</code> with a <code>nextPageToken</code> doesn't return a new decision task.</p> </note> /// - [`maximum_page_size(i32)`](crate::client::fluent_builders::PollForDecisionTask::maximum_page_size) / [`set_maximum_page_size(i32)`](crate::client::fluent_builders::PollForDecisionTask::set_maximum_page_size): <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> <p>This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.</p> /// - [`reverse_order(bool)`](crate::client::fluent_builders::PollForDecisionTask::reverse_order) / [`set_reverse_order(bool)`](crate::client::fluent_builders::PollForDecisionTask::set_reverse_order): <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimestamp</code> of the events.</p> /// - On success, responds with [`PollForDecisionTaskOutput`](crate::output::PollForDecisionTaskOutput) with field(s): /// - [`task_token(Option<String>)`](crate::output::PollForDecisionTaskOutput::task_token): <p>The opaque string used as a handle on the task. This token is used by workers to communicate progress and response information back to the system about the task.</p> /// - [`started_event_id(i64)`](crate::output::PollForDecisionTaskOutput::started_event_id): <p>The ID of the <code>DecisionTaskStarted</code> event recorded in the history.</p> /// - [`workflow_execution(Option<WorkflowExecution>)`](crate::output::PollForDecisionTaskOutput::workflow_execution): <p>The workflow execution for which this decision task was created.</p> /// - [`workflow_type(Option<WorkflowType>)`](crate::output::PollForDecisionTaskOutput::workflow_type): <p>The type of the workflow execution for which this decision task was created.</p> /// - [`events(Option<Vec<HistoryEvent>>)`](crate::output::PollForDecisionTaskOutput::events): <p>A paginated list of history events of the workflow execution. The decider uses this during the processing of the decision task.</p> /// - [`next_page_token(Option<String>)`](crate::output::PollForDecisionTaskOutput::next_page_token): <p>If a <code>NextPageToken</code> was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in <code>nextPageToken</code>. Keep all other arguments unchanged.</p> <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call.</p> /// - [`previous_started_event_id(i64)`](crate::output::PollForDecisionTaskOutput::previous_started_event_id): <p>The ID of the DecisionTaskStarted event of the previous decision task of this workflow execution that was processed by the decider. This can be used to determine the events in the history new since the last decision task received by the decider.</p> /// - On failure, responds with [`SdkError<PollForDecisionTaskError>`](crate::error::PollForDecisionTaskError) pub fn poll_for_decision_task(&self) -> fluent_builders::PollForDecisionTask { fluent_builders::PollForDecisionTask::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RecordActivityTaskHeartbeat`](crate::client::fluent_builders::RecordActivityTaskHeartbeat) operation. /// /// - The fluent builder is configurable: /// - [`task_token(impl Into<String>)`](crate::client::fluent_builders::RecordActivityTaskHeartbeat::task_token) / [`set_task_token(Option<String>)`](crate::client::fluent_builders::RecordActivityTaskHeartbeat::set_task_token): <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results. </p> </important> /// - [`details(impl Into<String>)`](crate::client::fluent_builders::RecordActivityTaskHeartbeat::details) / [`set_details(Option<String>)`](crate::client::fluent_builders::RecordActivityTaskHeartbeat::set_details): <p>If specified, contains details about the progress of the task.</p> /// - On success, responds with [`RecordActivityTaskHeartbeatOutput`](crate::output::RecordActivityTaskHeartbeatOutput) with field(s): /// - [`cancel_requested(bool)`](crate::output::RecordActivityTaskHeartbeatOutput::cancel_requested): <p>Set to <code>true</code> if cancellation of the task is requested.</p> /// - On failure, responds with [`SdkError<RecordActivityTaskHeartbeatError>`](crate::error::RecordActivityTaskHeartbeatError) pub fn record_activity_task_heartbeat(&self) -> fluent_builders::RecordActivityTaskHeartbeat { fluent_builders::RecordActivityTaskHeartbeat::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RegisterActivityType`](crate::client::fluent_builders::RegisterActivityType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_domain): <p>The name of the domain in which this activity is to be registered.</p> /// - [`name(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_name): <p>The name of the activity type within the domain.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`version(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::version) / [`set_version(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_version): <p>The version of the activity type.</p> <note> <p>The activity type consists of the name and version, the combination of which must be unique within the domain.</p> </note> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`description(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_description): <p>A textual description of the activity type.</p> /// - [`default_task_start_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::default_task_start_to_close_timeout) / [`set_default_task_start_to_close_timeout(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_start_to_close_timeout): <p>If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> /// - [`default_task_heartbeat_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::default_task_heartbeat_timeout) / [`set_default_task_heartbeat_timeout(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_heartbeat_timeout): <p>If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling <code>RecordActivityTaskHeartbeat</code>. If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an <code>UnknownResource</code> fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> /// - [`default_task_list(TaskList)`](crate::client::fluent_builders::RegisterActivityType::default_task_list) / [`set_default_task_list(Option<TaskList>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_list): <p>If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list isn't provided when a task is scheduled through the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// - [`default_task_priority(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::default_task_priority) / [`set_default_task_priority(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_priority): <p>The default task priority to assign to the activity type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>in the <i>Amazon SWF Developer Guide</i>.</i>.</p> /// - [`default_task_schedule_to_start_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::default_task_schedule_to_start_timeout) / [`set_default_task_schedule_to_start_timeout(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_schedule_to_start_timeout): <p>If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> /// - [`default_task_schedule_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterActivityType::default_task_schedule_to_close_timeout) / [`set_default_task_schedule_to_close_timeout(Option<String>)`](crate::client::fluent_builders::RegisterActivityType::set_default_task_schedule_to_close_timeout): <p>If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> /// - On success, responds with [`RegisterActivityTypeOutput`](crate::output::RegisterActivityTypeOutput) /// - On failure, responds with [`SdkError<RegisterActivityTypeError>`](crate::error::RegisterActivityTypeError) pub fn register_activity_type(&self) -> fluent_builders::RegisterActivityType { fluent_builders::RegisterActivityType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RegisterDomain`](crate::client::fluent_builders::RegisterDomain) operation. /// /// - The fluent builder is configurable: /// - [`name(impl Into<String>)`](crate::client::fluent_builders::RegisterDomain::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::RegisterDomain::set_name): <p>Name of the domain to register. The name must be unique in the region that the domain is registered in.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`description(impl Into<String>)`](crate::client::fluent_builders::RegisterDomain::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::RegisterDomain::set_description): <p>A text description of the domain.</p> /// - [`workflow_execution_retention_period_in_days(impl Into<String>)`](crate::client::fluent_builders::RegisterDomain::workflow_execution_retention_period_in_days) / [`set_workflow_execution_retention_period_in_days(Option<String>)`](crate::client::fluent_builders::RegisterDomain::set_workflow_execution_retention_period_in_days): <p>The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution isn't available in the results of visibility calls.</p> <p>If you pass the value <code>NONE</code> or <code>0</code> (zero), then the workflow execution history isn't retained. As soon as the workflow execution completes, the execution record and its history are deleted.</p> <p>The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html">Amazon SWF Service Limits</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// - [`tags(Vec<ResourceTag>)`](crate::client::fluent_builders::RegisterDomain::tags) / [`set_tags(Option<Vec<ResourceTag>>)`](crate::client::fluent_builders::RegisterDomain::set_tags): <p>Tags to be added when registering a domain.</p> <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> /// - On success, responds with [`RegisterDomainOutput`](crate::output::RegisterDomainOutput) /// - On failure, responds with [`SdkError<RegisterDomainError>`](crate::error::RegisterDomainError) pub fn register_domain(&self) -> fluent_builders::RegisterDomain { fluent_builders::RegisterDomain::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RegisterWorkflowType`](crate::client::fluent_builders::RegisterWorkflowType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_domain): <p>The name of the domain in which to register the workflow type.</p> /// - [`name(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_name): <p>The name of the workflow type.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`version(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::version) / [`set_version(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_version): <p>The version of the workflow type.</p> <note> <p>The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the <code>ListWorkflowTypes</code> action.</p> </note> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`description(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_description): <p>Textual description of the workflow type.</p> /// - [`default_task_start_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::default_task_start_to_close_timeout) / [`set_default_task_start_to_close_timeout(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_task_start_to_close_timeout): <p>If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> /// - [`default_execution_start_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::default_execution_start_to_close_timeout) / [`set_default_execution_start_to_close_timeout(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_execution_start_to_close_timeout): <p>If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> <p>The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for <code>defaultExecutionStartToCloseTimeout</code>; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit always causes the workflow execution to time out.</p> /// - [`default_task_list(TaskList)`](crate::client::fluent_builders::RegisterWorkflowType::default_task_list) / [`set_default_task_list(Option<TaskList>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_task_list): <p>If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list isn't provided when starting the execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// - [`default_task_priority(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::default_task_priority) / [`set_default_task_priority(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_task_priority): <p>The default task priority to assign to the workflow type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// - [`default_child_policy(ChildPolicy)`](crate::client::fluent_builders::RegisterWorkflowType::default_child_policy) / [`set_default_child_policy(Option<ChildPolicy>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_child_policy): <p>If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> <p>The supported child policies are:</p> <ul> <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> </ul> /// - [`default_lambda_role(impl Into<String>)`](crate::client::fluent_builders::RegisterWorkflowType::default_lambda_role) / [`set_default_lambda_role(Option<String>)`](crate::client::fluent_builders::RegisterWorkflowType::set_default_lambda_role): <p>The default IAM role attached to this workflow type.</p> <note> <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when you start this workflow type, the default Lambda role is attached to the execution. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> </note> /// - On success, responds with [`RegisterWorkflowTypeOutput`](crate::output::RegisterWorkflowTypeOutput) /// - On failure, responds with [`SdkError<RegisterWorkflowTypeError>`](crate::error::RegisterWorkflowTypeError) pub fn register_workflow_type(&self) -> fluent_builders::RegisterWorkflowType { fluent_builders::RegisterWorkflowType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RequestCancelWorkflowExecution`](crate::client::fluent_builders::RequestCancelWorkflowExecution) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::set_domain): <p>The name of the domain containing the workflow execution to cancel.</p> /// - [`workflow_id(impl Into<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::workflow_id) / [`set_workflow_id(Option<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::set_workflow_id): <p>The workflowId of the workflow execution to cancel.</p> /// - [`run_id(impl Into<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::run_id) / [`set_run_id(Option<String>)`](crate::client::fluent_builders::RequestCancelWorkflowExecution::set_run_id): <p>The runId of the workflow execution to cancel.</p> /// - On success, responds with [`RequestCancelWorkflowExecutionOutput`](crate::output::RequestCancelWorkflowExecutionOutput) /// - On failure, responds with [`SdkError<RequestCancelWorkflowExecutionError>`](crate::error::RequestCancelWorkflowExecutionError) pub fn request_cancel_workflow_execution( &self, ) -> fluent_builders::RequestCancelWorkflowExecution { fluent_builders::RequestCancelWorkflowExecution::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RespondActivityTaskCanceled`](crate::client::fluent_builders::RespondActivityTaskCanceled) operation. /// /// - The fluent builder is configurable: /// - [`task_token(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskCanceled::task_token) / [`set_task_token(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskCanceled::set_task_token): <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> </important> /// - [`details(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskCanceled::details) / [`set_details(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskCanceled::set_details): <p> Information about the cancellation.</p> /// - On success, responds with [`RespondActivityTaskCanceledOutput`](crate::output::RespondActivityTaskCanceledOutput) /// - On failure, responds with [`SdkError<RespondActivityTaskCanceledError>`](crate::error::RespondActivityTaskCanceledError) pub fn respond_activity_task_canceled(&self) -> fluent_builders::RespondActivityTaskCanceled { fluent_builders::RespondActivityTaskCanceled::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RespondActivityTaskCompleted`](crate::client::fluent_builders::RespondActivityTaskCompleted) operation. /// /// - The fluent builder is configurable: /// - [`task_token(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskCompleted::task_token) / [`set_task_token(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskCompleted::set_task_token): <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> </important> /// - [`result(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskCompleted::result) / [`set_result(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskCompleted::set_result): <p>The result of the activity task. It is a free form string that is implementation specific.</p> /// - On success, responds with [`RespondActivityTaskCompletedOutput`](crate::output::RespondActivityTaskCompletedOutput) /// - On failure, responds with [`SdkError<RespondActivityTaskCompletedError>`](crate::error::RespondActivityTaskCompletedError) pub fn respond_activity_task_completed(&self) -> fluent_builders::RespondActivityTaskCompleted { fluent_builders::RespondActivityTaskCompleted::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RespondActivityTaskFailed`](crate::client::fluent_builders::RespondActivityTaskFailed) operation. /// /// - The fluent builder is configurable: /// - [`task_token(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::task_token) / [`set_task_token(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::set_task_token): <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> </important> /// - [`reason(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::reason) / [`set_reason(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::set_reason): <p>Description of the error that may assist in diagnostics.</p> /// - [`details(impl Into<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::details) / [`set_details(Option<String>)`](crate::client::fluent_builders::RespondActivityTaskFailed::set_details): <p> Detailed information about the failure.</p> /// - On success, responds with [`RespondActivityTaskFailedOutput`](crate::output::RespondActivityTaskFailedOutput) /// - On failure, responds with [`SdkError<RespondActivityTaskFailedError>`](crate::error::RespondActivityTaskFailedError) pub fn respond_activity_task_failed(&self) -> fluent_builders::RespondActivityTaskFailed { fluent_builders::RespondActivityTaskFailed::new(self.handle.clone()) } /// Constructs a fluent builder for the [`RespondDecisionTaskCompleted`](crate::client::fluent_builders::RespondDecisionTaskCompleted) operation. /// /// - The fluent builder is configurable: /// - [`task_token(impl Into<String>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::task_token) / [`set_task_token(Option<String>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::set_task_token): <p>The <code>taskToken</code> from the <code>DecisionTask</code>.</p> <important> <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> </important> /// - [`decisions(Vec<Decision>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::decisions) / [`set_decisions(Option<Vec<Decision>>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::set_decisions): <p>The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the <code>Decision</code> structure for details.</p> /// - [`execution_context(impl Into<String>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::execution_context) / [`set_execution_context(Option<String>)`](crate::client::fluent_builders::RespondDecisionTaskCompleted::set_execution_context): <p>User defined context to add to workflow execution.</p> /// - On success, responds with [`RespondDecisionTaskCompletedOutput`](crate::output::RespondDecisionTaskCompletedOutput) /// - On failure, responds with [`SdkError<RespondDecisionTaskCompletedError>`](crate::error::RespondDecisionTaskCompletedError) pub fn respond_decision_task_completed(&self) -> fluent_builders::RespondDecisionTaskCompleted { fluent_builders::RespondDecisionTaskCompleted::new(self.handle.clone()) } /// Constructs a fluent builder for the [`SignalWorkflowExecution`](crate::client::fluent_builders::SignalWorkflowExecution) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::set_domain): <p>The name of the domain containing the workflow execution to signal.</p> /// - [`workflow_id(impl Into<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::workflow_id) / [`set_workflow_id(Option<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::set_workflow_id): <p>The workflowId of the workflow execution to signal.</p> /// - [`run_id(impl Into<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::run_id) / [`set_run_id(Option<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::set_run_id): <p>The runId of the workflow execution to signal.</p> /// - [`signal_name(impl Into<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::signal_name) / [`set_signal_name(Option<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::set_signal_name): <p>The name of the signal. This name must be meaningful to the target workflow.</p> /// - [`input(impl Into<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::input) / [`set_input(Option<String>)`](crate::client::fluent_builders::SignalWorkflowExecution::set_input): <p>Data to attach to the <code>WorkflowExecutionSignaled</code> event in the target workflow execution's history.</p> /// - On success, responds with [`SignalWorkflowExecutionOutput`](crate::output::SignalWorkflowExecutionOutput) /// - On failure, responds with [`SdkError<SignalWorkflowExecutionError>`](crate::error::SignalWorkflowExecutionError) pub fn signal_workflow_execution(&self) -> fluent_builders::SignalWorkflowExecution { fluent_builders::SignalWorkflowExecution::new(self.handle.clone()) } /// Constructs a fluent builder for the [`StartWorkflowExecution`](crate::client::fluent_builders::StartWorkflowExecution) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_domain): <p>The name of the domain in which the workflow execution is created.</p> /// - [`workflow_id(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::workflow_id) / [`set_workflow_id(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_workflow_id): <p>The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a <i>restart</i> of a previous execution. You cannot have two open workflow executions with the same <code>workflowId</code> at the same time within the same domain.</p> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`workflow_type(WorkflowType)`](crate::client::fluent_builders::StartWorkflowExecution::workflow_type) / [`set_workflow_type(Option<WorkflowType>)`](crate::client::fluent_builders::StartWorkflowExecution::set_workflow_type): <p>The type of the workflow to start.</p> /// - [`task_list(TaskList)`](crate::client::fluent_builders::StartWorkflowExecution::task_list) / [`set_task_list(Option<TaskList>)`](crate::client::fluent_builders::StartWorkflowExecution::set_task_list): <p>The task list to use for the decision tasks generated for this workflow execution. This overrides the <code>defaultTaskList</code> specified when registering the workflow type.</p> <note> <p>A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.</p> </note> <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> /// - [`task_priority(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::task_priority) / [`set_task_priority(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_task_priority): <p>The task priority to use for this workflow execution. This overrides any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// - [`input(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::input) / [`set_input(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_input): <p>The input for the workflow execution. This is a free form string which should be meaningful to the workflow you are starting. This <code>input</code> is made available to the new workflow execution in the <code>WorkflowExecutionStarted</code> history event.</p> /// - [`execution_start_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::execution_start_to_close_timeout) / [`set_execution_start_to_close_timeout(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_execution_start_to_close_timeout): <p>The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.</p> <p>The duration is specified in seconds; an integer greater than or equal to <code>0</code>. Exceeding this limit causes the workflow execution to time out. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for this timeout; there is a one-year max limit on the time that a workflow execution can run.</p> <note> <p>An execution start-to-close timeout must be specified either through this parameter or as a default when the workflow type is registered. If neither this parameter nor a default execution start-to-close timeout is specified, a fault is returned.</p> </note> /// - [`tag_list(Vec<String>)`](crate::client::fluent_builders::StartWorkflowExecution::tag_list) / [`set_tag_list(Option<Vec<String>>)`](crate::client::fluent_builders::StartWorkflowExecution::set_tag_list): <p>The list of tags to associate with the workflow execution. You can specify a maximum of 5 tags. You can list workflow executions with a specific tag by calling <code>ListOpenWorkflowExecutions</code> or <code>ListClosedWorkflowExecutions</code> and specifying a <code>TagFilter</code>.</p> /// - [`task_start_to_close_timeout(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::task_start_to_close_timeout) / [`set_task_start_to_close_timeout(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_task_start_to_close_timeout): <p>Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the <code>defaultTaskStartToCloseTimout</code> specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> <note> <p>A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault is returned.</p> </note> /// - [`child_policy(ChildPolicy)`](crate::client::fluent_builders::StartWorkflowExecution::child_policy) / [`set_child_policy(Option<ChildPolicy>)`](crate::client::fluent_builders::StartWorkflowExecution::set_child_policy): <p>If set, specifies the policy to use for the child workflow executions of this workflow execution if it is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> <p>The supported child policies are:</p> <ul> <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> </ul> <note> <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> </note> /// - [`lambda_role(impl Into<String>)`](crate::client::fluent_builders::StartWorkflowExecution::lambda_role) / [`set_lambda_role(Option<String>)`](crate::client::fluent_builders::StartWorkflowExecution::set_lambda_role): <p>The IAM role to attach to this workflow execution.</p> <note> <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't attach an IAM role, any attempt to schedule a Lambda task fails. This results in a <code>ScheduleLambdaFunctionFailed</code> history event. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> </note> /// - On success, responds with [`StartWorkflowExecutionOutput`](crate::output::StartWorkflowExecutionOutput) with field(s): /// - [`run_id(Option<String>)`](crate::output::StartWorkflowExecutionOutput::run_id): <p>The <code>runId</code> of a workflow execution. This ID is generated by the service and can be used to uniquely identify the workflow execution within a domain.</p> /// - On failure, responds with [`SdkError<StartWorkflowExecutionError>`](crate::error::StartWorkflowExecutionError) pub fn start_workflow_execution(&self) -> fluent_builders::StartWorkflowExecution { fluent_builders::StartWorkflowExecution::new(self.handle.clone()) } /// Constructs a fluent builder for the [`TagResource`](crate::client::fluent_builders::TagResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::TagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::TagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> /// - [`tags(Vec<ResourceTag>)`](crate::client::fluent_builders::TagResource::tags) / [`set_tags(Option<Vec<ResourceTag>>)`](crate::client::fluent_builders::TagResource::set_tags): <p>The list of tags to add to a domain. </p> <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> /// - On success, responds with [`TagResourceOutput`](crate::output::TagResourceOutput) /// - On failure, responds with [`SdkError<TagResourceError>`](crate::error::TagResourceError) pub fn tag_resource(&self) -> fluent_builders::TagResource { fluent_builders::TagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the [`TerminateWorkflowExecution`](crate::client::fluent_builders::TerminateWorkflowExecution) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_domain): <p>The domain of the workflow execution to terminate.</p> /// - [`workflow_id(impl Into<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::workflow_id) / [`set_workflow_id(Option<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_workflow_id): <p>The workflowId of the workflow execution to terminate.</p> /// - [`run_id(impl Into<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::run_id) / [`set_run_id(Option<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_run_id): <p>The runId of the workflow execution to terminate.</p> /// - [`reason(impl Into<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::reason) / [`set_reason(Option<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_reason): <p> A descriptive reason for terminating the workflow execution.</p> /// - [`details(impl Into<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::details) / [`set_details(Option<String>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_details): <p> Details for terminating the workflow execution.</p> /// - [`child_policy(ChildPolicy)`](crate::client::fluent_builders::TerminateWorkflowExecution::child_policy) / [`set_child_policy(Option<ChildPolicy>)`](crate::client::fluent_builders::TerminateWorkflowExecution::set_child_policy): <p>If set, specifies the policy to use for the child workflow executions of the workflow execution being terminated. This policy overrides the child policy specified for the workflow execution at registration time or when starting the execution.</p> <p>The supported child policies are:</p> <ul> <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> </ul> <note> <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> </note> /// - On success, responds with [`TerminateWorkflowExecutionOutput`](crate::output::TerminateWorkflowExecutionOutput) /// - On failure, responds with [`SdkError<TerminateWorkflowExecutionError>`](crate::error::TerminateWorkflowExecutionError) pub fn terminate_workflow_execution(&self) -> fluent_builders::TerminateWorkflowExecution { fluent_builders::TerminateWorkflowExecution::new(self.handle.clone()) } /// Constructs a fluent builder for the [`UndeprecateActivityType`](crate::client::fluent_builders::UndeprecateActivityType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::UndeprecateActivityType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::UndeprecateActivityType::set_domain): <p>The name of the domain of the deprecated activity type.</p> /// - [`activity_type(ActivityType)`](crate::client::fluent_builders::UndeprecateActivityType::activity_type) / [`set_activity_type(Option<ActivityType>)`](crate::client::fluent_builders::UndeprecateActivityType::set_activity_type): <p>The activity type to undeprecate.</p> /// - On success, responds with [`UndeprecateActivityTypeOutput`](crate::output::UndeprecateActivityTypeOutput) /// - On failure, responds with [`SdkError<UndeprecateActivityTypeError>`](crate::error::UndeprecateActivityTypeError) pub fn undeprecate_activity_type(&self) -> fluent_builders::UndeprecateActivityType { fluent_builders::UndeprecateActivityType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`UndeprecateDomain`](crate::client::fluent_builders::UndeprecateDomain) operation. /// /// - The fluent builder is configurable: /// - [`name(impl Into<String>)`](crate::client::fluent_builders::UndeprecateDomain::name) / [`set_name(Option<String>)`](crate::client::fluent_builders::UndeprecateDomain::set_name): <p>The name of the domain of the deprecated workflow type.</p> /// - On success, responds with [`UndeprecateDomainOutput`](crate::output::UndeprecateDomainOutput) /// - On failure, responds with [`SdkError<UndeprecateDomainError>`](crate::error::UndeprecateDomainError) pub fn undeprecate_domain(&self) -> fluent_builders::UndeprecateDomain { fluent_builders::UndeprecateDomain::new(self.handle.clone()) } /// Constructs a fluent builder for the [`UndeprecateWorkflowType`](crate::client::fluent_builders::UndeprecateWorkflowType) operation. /// /// - The fluent builder is configurable: /// - [`domain(impl Into<String>)`](crate::client::fluent_builders::UndeprecateWorkflowType::domain) / [`set_domain(Option<String>)`](crate::client::fluent_builders::UndeprecateWorkflowType::set_domain): <p>The name of the domain of the deprecated workflow type.</p> /// - [`workflow_type(WorkflowType)`](crate::client::fluent_builders::UndeprecateWorkflowType::workflow_type) / [`set_workflow_type(Option<WorkflowType>)`](crate::client::fluent_builders::UndeprecateWorkflowType::set_workflow_type): <p>The name of the domain of the deprecated workflow type.</p> /// - On success, responds with [`UndeprecateWorkflowTypeOutput`](crate::output::UndeprecateWorkflowTypeOutput) /// - On failure, responds with [`SdkError<UndeprecateWorkflowTypeError>`](crate::error::UndeprecateWorkflowTypeError) pub fn undeprecate_workflow_type(&self) -> fluent_builders::UndeprecateWorkflowType { fluent_builders::UndeprecateWorkflowType::new(self.handle.clone()) } /// Constructs a fluent builder for the [`UntagResource`](crate::client::fluent_builders::UntagResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::UntagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::UntagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> /// - [`tag_keys(Vec<String>)`](crate::client::fluent_builders::UntagResource::tag_keys) / [`set_tag_keys(Option<Vec<String>>)`](crate::client::fluent_builders::UntagResource::set_tag_keys): <p>The list of tags to remove from the Amazon SWF domain.</p> /// - On success, responds with [`UntagResourceOutput`](crate::output::UntagResourceOutput) /// - On failure, responds with [`SdkError<UntagResourceError>`](crate::error::UntagResourceError) pub fn untag_resource(&self) -> fluent_builders::UntagResource { fluent_builders::UntagResource::new(self.handle.clone()) } } pub mod fluent_builders { //! //! Utilities to ergonomically construct a request to the service. //! //! Fluent builders are created through the [`Client`](crate::client::Client) by calling //! one if its operation methods. After parameters are set using the builder methods, //! the `send` method can be called to initiate the request. //! /// Fluent builder constructing a request to `CountClosedWorkflowExecutions`. /// /// <p>Returns the number of closed workflow executions within the given domain that meet the specified filtering criteria.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>tagFilter.tag</code>: String constraint. The key is <code>swf:tagFilter.tag</code>.</p> </li> /// <li> <p> <code>typeFilter.name</code>: String constraint. The key is <code>swf:typeFilter.name</code>.</p> </li> /// <li> <p> <code>typeFilter.version</code>: String constraint. The key is <code>swf:typeFilter.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CountClosedWorkflowExecutions { handle: std::sync::Arc<super::Handle>, inner: crate::input::count_closed_workflow_executions_input::Builder, } impl CountClosedWorkflowExecutions { /// Creates a new `CountClosedWorkflowExecutions`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CountClosedWorkflowExecutionsOutput, aws_smithy_http::result::SdkError<crate::error::CountClosedWorkflowExecutionsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain containing the workflow executions to count.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow executions to count.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>If specified, only workflow executions that meet the start time criteria of the filter are counted.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn start_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.start_time_filter(input); self } /// <p>If specified, only workflow executions that meet the start time criteria of the filter are counted.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn set_start_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_start_time_filter(input); self } /// <p>If specified, only workflow executions that meet the close time criteria of the filter are counted.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn close_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.close_time_filter(input); self } /// <p>If specified, only workflow executions that meet the close time criteria of the filter are counted.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn set_close_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_close_time_filter(input); self } /// <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn execution_filter(mut self, input: crate::model::WorkflowExecutionFilter) -> Self { self.inner = self.inner.execution_filter(input); self } /// <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_execution_filter( mut self, input: std::option::Option<crate::model::WorkflowExecutionFilter>, ) -> Self { self.inner = self.inner.set_execution_filter(input); self } /// <p>If specified, indicates the type of the workflow executions to be counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn type_filter(mut self, input: crate::model::WorkflowTypeFilter) -> Self { self.inner = self.inner.type_filter(input); self } /// <p>If specified, indicates the type of the workflow executions to be counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_type_filter( mut self, input: std::option::Option<crate::model::WorkflowTypeFilter>, ) -> Self { self.inner = self.inner.set_type_filter(input); self } /// <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn tag_filter(mut self, input: crate::model::TagFilter) -> Self { self.inner = self.inner.tag_filter(input); self } /// <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_tag_filter( mut self, input: std::option::Option<crate::model::TagFilter>, ) -> Self { self.inner = self.inner.set_tag_filter(input); self } /// <p>If specified, only workflow executions that match this close status are counted. This filter has an affect only if <code>executionStatus</code> is specified as <code>CLOSED</code>.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn close_status_filter(mut self, input: crate::model::CloseStatusFilter) -> Self { self.inner = self.inner.close_status_filter(input); self } /// <p>If specified, only workflow executions that match this close status are counted. This filter has an affect only if <code>executionStatus</code> is specified as <code>CLOSED</code>.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_close_status_filter( mut self, input: std::option::Option<crate::model::CloseStatusFilter>, ) -> Self { self.inner = self.inner.set_close_status_filter(input); self } } /// Fluent builder constructing a request to `CountOpenWorkflowExecutions`. /// /// <p>Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>tagFilter.tag</code>: String constraint. The key is <code>swf:tagFilter.tag</code>.</p> </li> /// <li> <p> <code>typeFilter.name</code>: String constraint. The key is <code>swf:typeFilter.name</code>.</p> </li> /// <li> <p> <code>typeFilter.version</code>: String constraint. The key is <code>swf:typeFilter.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CountOpenWorkflowExecutions { handle: std::sync::Arc<super::Handle>, inner: crate::input::count_open_workflow_executions_input::Builder, } impl CountOpenWorkflowExecutions { /// Creates a new `CountOpenWorkflowExecutions`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CountOpenWorkflowExecutionsOutput, aws_smithy_http::result::SdkError<crate::error::CountOpenWorkflowExecutionsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain containing the workflow executions to count.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow executions to count.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>Specifies the start time criteria that workflow executions must meet in order to be counted.</p> pub fn start_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.start_time_filter(input); self } /// <p>Specifies the start time criteria that workflow executions must meet in order to be counted.</p> pub fn set_start_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_start_time_filter(input); self } /// <p>Specifies the type of the workflow executions to be counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn type_filter(mut self, input: crate::model::WorkflowTypeFilter) -> Self { self.inner = self.inner.type_filter(input); self } /// <p>Specifies the type of the workflow executions to be counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_type_filter( mut self, input: std::option::Option<crate::model::WorkflowTypeFilter>, ) -> Self { self.inner = self.inner.set_type_filter(input); self } /// <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn tag_filter(mut self, input: crate::model::TagFilter) -> Self { self.inner = self.inner.tag_filter(input); self } /// <p>If specified, only executions that have a tag that matches the filter are counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_tag_filter( mut self, input: std::option::Option<crate::model::TagFilter>, ) -> Self { self.inner = self.inner.set_tag_filter(input); self } /// <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn execution_filter(mut self, input: crate::model::WorkflowExecutionFilter) -> Self { self.inner = self.inner.execution_filter(input); self } /// <p>If specified, only workflow executions matching the <code>WorkflowId</code> in the filter are counted.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_execution_filter( mut self, input: std::option::Option<crate::model::WorkflowExecutionFilter>, ) -> Self { self.inner = self.inner.set_execution_filter(input); self } } /// Fluent builder constructing a request to `CountPendingActivityTasks`. /// /// <p>Returns the estimated number of activity tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no activity task was ever scheduled in then <code>0</code> is returned.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the <code>taskList.name</code> parameter by using a <code>Condition</code> element with the <code>swf:taskList.name</code> key to allow the action to access only certain task lists.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CountPendingActivityTasks { handle: std::sync::Arc<super::Handle>, inner: crate::input::count_pending_activity_tasks_input::Builder, } impl CountPendingActivityTasks { /// Creates a new `CountPendingActivityTasks`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CountPendingActivityTasksOutput, aws_smithy_http::result::SdkError<crate::error::CountPendingActivityTasksError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain that contains the task list.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain that contains the task list.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The name of the task list.</p> pub fn task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.task_list(input); self } /// <p>The name of the task list.</p> pub fn set_task_list(mut self, input: std::option::Option<crate::model::TaskList>) -> Self { self.inner = self.inner.set_task_list(input); self } } /// Fluent builder constructing a request to `CountPendingDecisionTasks`. /// /// <p>Returns the estimated number of decision tasks in the specified task list. The count returned is an approximation and isn't guaranteed to be exact. If you specify a task list that no decision task was ever scheduled in then <code>0</code> is returned.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the <code>taskList.name</code> parameter by using a <code>Condition</code> element with the <code>swf:taskList.name</code> key to allow the action to access only certain task lists.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CountPendingDecisionTasks { handle: std::sync::Arc<super::Handle>, inner: crate::input::count_pending_decision_tasks_input::Builder, } impl CountPendingDecisionTasks { /// Creates a new `CountPendingDecisionTasks`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CountPendingDecisionTasksOutput, aws_smithy_http::result::SdkError<crate::error::CountPendingDecisionTasksError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain that contains the task list.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain that contains the task list.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The name of the task list.</p> pub fn task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.task_list(input); self } /// <p>The name of the task list.</p> pub fn set_task_list(mut self, input: std::option::Option<crate::model::TaskList>) -> Self { self.inner = self.inner.set_task_list(input); self } } /// Fluent builder constructing a request to `DeprecateActivityType`. /// /// <p>Deprecates the specified <i>activity type</i>. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>activityType.name</code>: String constraint. The key is <code>swf:activityType.name</code>.</p> </li> /// <li> <p> <code>activityType.version</code>: String constraint. The key is <code>swf:activityType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeprecateActivityType { handle: std::sync::Arc<super::Handle>, inner: crate::input::deprecate_activity_type_input::Builder, } impl DeprecateActivityType { /// Creates a new `DeprecateActivityType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeprecateActivityTypeOutput, aws_smithy_http::result::SdkError<crate::error::DeprecateActivityTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which the activity type is registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the activity type is registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The activity type to deprecate.</p> pub fn activity_type(mut self, input: crate::model::ActivityType) -> Self { self.inner = self.inner.activity_type(input); self } /// <p>The activity type to deprecate.</p> pub fn set_activity_type( mut self, input: std::option::Option<crate::model::ActivityType>, ) -> Self { self.inner = self.inner.set_activity_type(input); self } } /// Fluent builder constructing a request to `DeprecateDomain`. /// /// <p>Deprecates the specified domain. After a domain has been deprecated it cannot be used to create new workflow executions or register new types. However, you can still use visibility actions on this domain. Deprecating a domain also deprecates all activity and workflow types registered in the domain. Executions that were started before the domain was deprecated continues to run.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeprecateDomain { handle: std::sync::Arc<super::Handle>, inner: crate::input::deprecate_domain_input::Builder, } impl DeprecateDomain { /// Creates a new `DeprecateDomain`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeprecateDomainOutput, aws_smithy_http::result::SdkError<crate::error::DeprecateDomainError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain to deprecate.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>The name of the domain to deprecate.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } } /// Fluent builder constructing a request to `DeprecateWorkflowType`. /// /// <p>Deprecates the specified <i>workflow type</i>. After a workflow type has been deprecated, you cannot create new executions of that type. Executions that were started before the type was deprecated continues to run. A deprecated workflow type may still be used when calling visibility actions.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>workflowType.name</code>: String constraint. The key is <code>swf:workflowType.name</code>.</p> </li> /// <li> <p> <code>workflowType.version</code>: String constraint. The key is <code>swf:workflowType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeprecateWorkflowType { handle: std::sync::Arc<super::Handle>, inner: crate::input::deprecate_workflow_type_input::Builder, } impl DeprecateWorkflowType { /// Creates a new `DeprecateWorkflowType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeprecateWorkflowTypeOutput, aws_smithy_http::result::SdkError<crate::error::DeprecateWorkflowTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which the workflow type is registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the workflow type is registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflow type to deprecate.</p> pub fn workflow_type(mut self, input: crate::model::WorkflowType) -> Self { self.inner = self.inner.workflow_type(input); self } /// <p>The workflow type to deprecate.</p> pub fn set_workflow_type( mut self, input: std::option::Option<crate::model::WorkflowType>, ) -> Self { self.inner = self.inner.set_workflow_type(input); self } } /// Fluent builder constructing a request to `DescribeActivityType`. /// /// <p>Returns information about the specified activity type. This includes configuration settings provided when the type was registered and other general information about the type.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>activityType.name</code>: String constraint. The key is <code>swf:activityType.name</code>.</p> </li> /// <li> <p> <code>activityType.version</code>: String constraint. The key is <code>swf:activityType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeActivityType { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_activity_type_input::Builder, } impl DescribeActivityType { /// Creates a new `DescribeActivityType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeActivityTypeOutput, aws_smithy_http::result::SdkError<crate::error::DescribeActivityTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which the activity type is registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the activity type is registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The activity type to get information about. Activity types are identified by the <code>name</code> and <code>version</code> that were supplied when the activity was registered.</p> pub fn activity_type(mut self, input: crate::model::ActivityType) -> Self { self.inner = self.inner.activity_type(input); self } /// <p>The activity type to get information about. Activity types are identified by the <code>name</code> and <code>version</code> that were supplied when the activity was registered.</p> pub fn set_activity_type( mut self, input: std::option::Option<crate::model::ActivityType>, ) -> Self { self.inner = self.inner.set_activity_type(input); self } } /// Fluent builder constructing a request to `DescribeDomain`. /// /// <p>Returns information about the specified domain, including description and status.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeDomain { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_domain_input::Builder, } impl DescribeDomain { /// Creates a new `DescribeDomain`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeDomainOutput, aws_smithy_http::result::SdkError<crate::error::DescribeDomainError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain to describe.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>The name of the domain to describe.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } } /// Fluent builder constructing a request to `DescribeWorkflowExecution`. /// /// <p>Returns information about the specified workflow execution including its type and some statistics.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeWorkflowExecution { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_workflow_execution_input::Builder, } impl DescribeWorkflowExecution { /// Creates a new `DescribeWorkflowExecution`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeWorkflowExecutionOutput, aws_smithy_http::result::SdkError<crate::error::DescribeWorkflowExecutionError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain containing the workflow execution.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow execution.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflow execution to describe.</p> pub fn execution(mut self, input: crate::model::WorkflowExecution) -> Self { self.inner = self.inner.execution(input); self } /// <p>The workflow execution to describe.</p> pub fn set_execution( mut self, input: std::option::Option<crate::model::WorkflowExecution>, ) -> Self { self.inner = self.inner.set_execution(input); self } } /// Fluent builder constructing a request to `DescribeWorkflowType`. /// /// <p>Returns information about the specified <i>workflow type</i>. This includes configuration settings specified when the type was registered and other information such as creation date, current status, etc.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>workflowType.name</code>: String constraint. The key is <code>swf:workflowType.name</code>.</p> </li> /// <li> <p> <code>workflowType.version</code>: String constraint. The key is <code>swf:workflowType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeWorkflowType { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_workflow_type_input::Builder, } impl DescribeWorkflowType { /// Creates a new `DescribeWorkflowType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeWorkflowTypeOutput, aws_smithy_http::result::SdkError<crate::error::DescribeWorkflowTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which this workflow type is registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which this workflow type is registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflow type to describe.</p> pub fn workflow_type(mut self, input: crate::model::WorkflowType) -> Self { self.inner = self.inner.workflow_type(input); self } /// <p>The workflow type to describe.</p> pub fn set_workflow_type( mut self, input: std::option::Option<crate::model::WorkflowType>, ) -> Self { self.inner = self.inner.set_workflow_type(input); self } } /// Fluent builder constructing a request to `GetWorkflowExecutionHistory`. /// /// <p>Returns the history of the specified workflow execution. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the <code>nextPageToken</code> returned by the initial call.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct GetWorkflowExecutionHistory { handle: std::sync::Arc<super::Handle>, inner: crate::input::get_workflow_execution_history_input::Builder, } impl GetWorkflowExecutionHistory { /// Creates a new `GetWorkflowExecutionHistory`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::GetWorkflowExecutionHistoryOutput, aws_smithy_http::result::SdkError<crate::error::GetWorkflowExecutionHistoryError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::GetWorkflowExecutionHistoryPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::GetWorkflowExecutionHistoryPaginator { crate::paginator::GetWorkflowExecutionHistoryPaginator::new(self.handle, self.inner) } /// <p>The name of the domain containing the workflow execution.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow execution.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>Specifies the workflow execution for which to return the history.</p> pub fn execution(mut self, input: crate::model::WorkflowExecution) -> Self { self.inner = self.inner.execution(input); self } /// <p>Specifies the workflow execution for which to return the history.</p> pub fn set_execution( mut self, input: std::option::Option<crate::model::WorkflowExecution>, ) -> Self { self.inner = self.inner.set_execution(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimeStamp</code> of the events.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimeStamp</code> of the events.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `ListActivityTypes`. /// /// <p>Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the <code>nextPageToken</code> returned by the initial call.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListActivityTypes { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_activity_types_input::Builder, } impl ListActivityTypes { /// Creates a new `ListActivityTypes`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListActivityTypesOutput, aws_smithy_http::result::SdkError<crate::error::ListActivityTypesError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::ListActivityTypesPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::ListActivityTypesPaginator { crate::paginator::ListActivityTypesPaginator::new(self.handle, self.inner) } /// <p>The name of the domain in which the activity types have been registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the activity types have been registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>If specified, only lists the activity types that have this name.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>If specified, only lists the activity types that have this name.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>Specifies the registration status of the activity types to list.</p> pub fn registration_status(mut self, input: crate::model::RegistrationStatus) -> Self { self.inner = self.inner.registration_status(input); self } /// <p>Specifies the registration status of the activity types to list.</p> pub fn set_registration_status( mut self, input: std::option::Option<crate::model::RegistrationStatus>, ) -> Self { self.inner = self.inner.set_registration_status(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the activity types.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the activity types.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `ListClosedWorkflowExecutions`. /// /// <p>Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>tagFilter.tag</code>: String constraint. The key is <code>swf:tagFilter.tag</code>.</p> </li> /// <li> <p> <code>typeFilter.name</code>: String constraint. The key is <code>swf:typeFilter.name</code>.</p> </li> /// <li> <p> <code>typeFilter.version</code>: String constraint. The key is <code>swf:typeFilter.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListClosedWorkflowExecutions { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_closed_workflow_executions_input::Builder, } impl ListClosedWorkflowExecutions { /// Creates a new `ListClosedWorkflowExecutions`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListClosedWorkflowExecutionsOutput, aws_smithy_http::result::SdkError<crate::error::ListClosedWorkflowExecutionsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::ListClosedWorkflowExecutionsPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::ListClosedWorkflowExecutionsPaginator { crate::paginator::ListClosedWorkflowExecutionsPaginator::new(self.handle, self.inner) } /// <p>The name of the domain that contains the workflow executions to list.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain that contains the workflow executions to list.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn start_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.start_time_filter(input); self } /// <p>If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn set_start_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_start_time_filter(input); self } /// <p>If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn close_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.close_time_filter(input); self } /// <p>If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times.</p> <note> /// <p> <code>startTimeFilter</code> and <code>closeTimeFilter</code> are mutually exclusive. You must specify one of these in a request but not both.</p> /// </note> pub fn set_close_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_close_time_filter(input); self } /// <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn execution_filter(mut self, input: crate::model::WorkflowExecutionFilter) -> Self { self.inner = self.inner.execution_filter(input); self } /// <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_execution_filter( mut self, input: std::option::Option<crate::model::WorkflowExecutionFilter>, ) -> Self { self.inner = self.inner.set_execution_filter(input); self } /// <p>If specified, only workflow executions that match this <i>close status</i> are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn close_status_filter(mut self, input: crate::model::CloseStatusFilter) -> Self { self.inner = self.inner.close_status_filter(input); self } /// <p>If specified, only workflow executions that match this <i>close status</i> are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_close_status_filter( mut self, input: std::option::Option<crate::model::CloseStatusFilter>, ) -> Self { self.inner = self.inner.set_close_status_filter(input); self } /// <p>If specified, only executions of the type specified in the filter are returned.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn type_filter(mut self, input: crate::model::WorkflowTypeFilter) -> Self { self.inner = self.inner.type_filter(input); self } /// <p>If specified, only executions of the type specified in the filter are returned.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_type_filter( mut self, input: std::option::Option<crate::model::WorkflowTypeFilter>, ) -> Self { self.inner = self.inner.set_type_filter(input); self } /// <p>If specified, only executions that have the matching tag are listed.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p>
} /// <p>If specified, only executions that have the matching tag are listed.</p> <note> /// <p> <code>closeStatusFilter</code>, <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_tag_filter( mut self, input: std::option::Option<crate::model::TagFilter>, ) -> Self { self.inner = self.inner.set_tag_filter(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `ListDomains`. /// /// <p>Returns the list of domains registered in the account. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains. The element must be set to <code>arn:aws:swf::AccountID:domain/*</code>, where <i>AccountID</i> is the account ID, with no dashes.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListDomains { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_domains_input::Builder, } impl ListDomains { /// Creates a new `ListDomains`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListDomainsOutput, aws_smithy_http::result::SdkError<crate::error::ListDomainsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::ListDomainsPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::ListDomainsPaginator { crate::paginator::ListDomainsPaginator::new(self.handle, self.inner) } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>Specifies the registration status of the domains to list.</p> pub fn registration_status(mut self, input: crate::model::RegistrationStatus) -> Self { self.inner = self.inner.registration_status(input); self } /// <p>Specifies the registration status of the domains to list.</p> pub fn set_registration_status( mut self, input: std::option::Option<crate::model::RegistrationStatus>, ) -> Self { self.inner = self.inner.set_registration_status(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the domains.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default, the results are returned in ascending alphabetical order by <code>name</code> of the domains.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `ListOpenWorkflowExecutions`. /// /// <p>Returns a list of open workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>tagFilter.tag</code>: String constraint. The key is <code>swf:tagFilter.tag</code>.</p> </li> /// <li> <p> <code>typeFilter.name</code>: String constraint. The key is <code>swf:typeFilter.name</code>.</p> </li> /// <li> <p> <code>typeFilter.version</code>: String constraint. The key is <code>swf:typeFilter.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListOpenWorkflowExecutions { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_open_workflow_executions_input::Builder, } impl ListOpenWorkflowExecutions { /// Creates a new `ListOpenWorkflowExecutions`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListOpenWorkflowExecutionsOutput, aws_smithy_http::result::SdkError<crate::error::ListOpenWorkflowExecutionsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::ListOpenWorkflowExecutionsPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::ListOpenWorkflowExecutionsPaginator { crate::paginator::ListOpenWorkflowExecutionsPaginator::new(self.handle, self.inner) } /// <p>The name of the domain that contains the workflow executions to list.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain that contains the workflow executions to list.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>Workflow executions are included in the returned results based on whether their start times are within the range specified by this filter.</p> pub fn start_time_filter(mut self, input: crate::model::ExecutionTimeFilter) -> Self { self.inner = self.inner.start_time_filter(input); self } /// <p>Workflow executions are included in the returned results based on whether their start times are within the range specified by this filter.</p> pub fn set_start_time_filter( mut self, input: std::option::Option<crate::model::ExecutionTimeFilter>, ) -> Self { self.inner = self.inner.set_start_time_filter(input); self } /// <p>If specified, only executions of the type specified in the filter are returned.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn type_filter(mut self, input: crate::model::WorkflowTypeFilter) -> Self { self.inner = self.inner.type_filter(input); self } /// <p>If specified, only executions of the type specified in the filter are returned.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_type_filter( mut self, input: std::option::Option<crate::model::WorkflowTypeFilter>, ) -> Self { self.inner = self.inner.set_type_filter(input); self } /// <p>If specified, only executions that have the matching tag are listed.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn tag_filter(mut self, input: crate::model::TagFilter) -> Self { self.inner = self.inner.tag_filter(input); self } /// <p>If specified, only executions that have the matching tag are listed.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_tag_filter( mut self, input: std::option::Option<crate::model::TagFilter>, ) -> Self { self.inner = self.inner.set_tag_filter(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start time of the executions.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in descending order of the start time of the executions.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } /// <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn execution_filter(mut self, input: crate::model::WorkflowExecutionFilter) -> Self { self.inner = self.inner.execution_filter(input); self } /// <p>If specified, only workflow executions matching the workflow ID specified in the filter are returned.</p> <note> /// <p> <code>executionFilter</code>, <code>typeFilter</code> and <code>tagFilter</code> are mutually exclusive. You can specify at most one of these in a request.</p> /// </note> pub fn set_execution_filter( mut self, input: std::option::Option<crate::model::WorkflowExecutionFilter>, ) -> Self { self.inner = self.inner.set_execution_filter(input); self } } /// Fluent builder constructing a request to `ListTagsForResource`. /// /// <p>List tags for a given domain.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListTagsForResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_tags_for_resource_input::Builder, } impl ListTagsForResource { /// Creates a new `ListTagsForResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListTagsForResourceOutput, aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `ListWorkflowTypes`. /// /// <p>Returns information about workflow types in the specified domain. The results may be split into multiple pages that can be retrieved by making the call repeatedly.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListWorkflowTypes { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_workflow_types_input::Builder, } impl ListWorkflowTypes { /// Creates a new `ListWorkflowTypes`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListWorkflowTypesOutput, aws_smithy_http::result::SdkError<crate::error::ListWorkflowTypesError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::ListWorkflowTypesPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::ListWorkflowTypesPaginator { crate::paginator::ListWorkflowTypesPaginator::new(self.handle, self.inner) } /// <p>The name of the domain in which the workflow types have been registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the workflow types have been registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>If specified, lists the workflow type with this name.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>If specified, lists the workflow type with this name.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>Specifies the registration status of the workflow types to list.</p> pub fn registration_status(mut self, input: crate::model::RegistrationStatus) -> Self { self.inner = self.inner.registration_status(input); self } /// <p>Specifies the registration status of the workflow types to list.</p> pub fn set_registration_status( mut self, input: std::option::Option<crate::model::RegistrationStatus>, ) -> Self { self.inner = self.inner.set_registration_status(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in ascending alphabetical order of the <code>name</code> of the workflow types.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the results in reverse order. By default the results are returned in ascending alphabetical order of the <code>name</code> of the workflow types.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `PollForActivityTask`. /// /// <p>Used by workers to get an <code>ActivityTask</code> from the specified activity <code>taskList</code>. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available. The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns an empty result. An empty result, in this context, means that an ActivityTask is returned, but that the value of taskToken is an empty string. If a task is returned, the worker should use its type to identify and process it correctly.</p> <important> /// <p>Workers should set their client side socket timeout to at least 70 seconds (10 seconds higher than the maximum time service may hold the poll request).</p> /// </important> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the <code>taskList.name</code> parameter by using a <code>Condition</code> element with the <code>swf:taskList.name</code> key to allow the action to access only certain task lists.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct PollForActivityTask { handle: std::sync::Arc<super::Handle>, inner: crate::input::poll_for_activity_task_input::Builder, } impl PollForActivityTask { /// Creates a new `PollForActivityTask`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::PollForActivityTaskOutput, aws_smithy_http::result::SdkError<crate::error::PollForActivityTaskError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain that contains the task lists being polled.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain that contains the task lists being polled.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>Specifies the task list to poll for activity tasks.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.task_list(input); self } /// <p>Specifies the task list to poll for activity tasks.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_task_list(mut self, input: std::option::Option<crate::model::TaskList>) -> Self { self.inner = self.inner.set_task_list(input); self } /// <p>Identity of the worker making the request, recorded in the <code>ActivityTaskStarted</code> event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> pub fn identity(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.identity(input.into()); self } /// <p>Identity of the worker making the request, recorded in the <code>ActivityTaskStarted</code> event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> pub fn set_identity(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_identity(input); self } } /// Fluent builder constructing a request to `PollForDecisionTask`. /// /// <p>Used by deciders to get a <code>DecisionTask</code> from the specified decision <code>taskList</code>. A decision task may be returned for any open workflow execution that is using the specified task list. The task includes a paginated view of the history of the workflow execution. The decider should use the workflow type and the history to determine how to properly handle the task.</p> /// <p>This action initiates a long poll, where the service holds the HTTP connection open and responds as soon a task becomes available. If no decision task is available in the specified task list before the timeout of 60 seconds expires, an empty result is returned. An empty result, in this context, means that a DecisionTask is returned, but that the value of taskToken is an empty string.</p> <important> /// <p>Deciders should set their client side socket timeout to at least 70 seconds (10 seconds higher than the timeout).</p> /// </important> <important> /// <p>Because the number of workflow history events for a single workflow execution might be very large, the result returned might be split up across a number of pages. To retrieve subsequent pages, make additional calls to <code>PollForDecisionTask</code> using the <code>nextPageToken</code> returned by the initial call. Note that you do <i>not</i> call <code>GetWorkflowExecutionHistory</code> with this <code>nextPageToken</code>. Instead, call <code>PollForDecisionTask</code> again.</p> /// </important> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the <code>taskList.name</code> parameter by using a <code>Condition</code> element with the <code>swf:taskList.name</code> key to allow the action to access only certain task lists.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct PollForDecisionTask { handle: std::sync::Arc<super::Handle>, inner: crate::input::poll_for_decision_task_input::Builder, } impl PollForDecisionTask { /// Creates a new `PollForDecisionTask`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::PollForDecisionTaskOutput, aws_smithy_http::result::SdkError<crate::error::PollForDecisionTaskError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Create a paginator for this request /// /// Paginators are used by calling [`send().await`](crate::paginator::PollForDecisionTaskPaginator::send) which returns a [`Stream`](tokio_stream::Stream). pub fn into_paginator(self) -> crate::paginator::PollForDecisionTaskPaginator { crate::paginator::PollForDecisionTaskPaginator::new(self.handle, self.inner) } /// <p>The name of the domain containing the task lists to poll.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the task lists to poll.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>Specifies the task list to poll for decision tasks.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.task_list(input); self } /// <p>Specifies the task list to poll for decision tasks.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_task_list(mut self, input: std::option::Option<crate::model::TaskList>) -> Self { self.inner = self.inner.set_task_list(input); self } /// <p>Identity of the decider making the request, which is recorded in the DecisionTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> pub fn identity(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.identity(input.into()); self } /// <p>Identity of the decider making the request, which is recorded in the DecisionTaskStarted event in the workflow history. This enables diagnostic tracing when problems arise. The form of this identity is user defined.</p> pub fn set_identity(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_identity(input); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> <note> /// <p>The <code>nextPageToken</code> returned by this action cannot be used with <code>GetWorkflowExecutionHistory</code> to get the next page. You must call <code>PollForDecisionTask</code> again (with the <code>nextPageToken</code>) to retrieve the next page of history records. Calling <code>PollForDecisionTask</code> with a <code>nextPageToken</code> doesn't return a new decision task.</p> /// </note> pub fn next_page_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_page_token(input.into()); self } /// <p>If <code>NextPageToken</code> is returned there are more results available. The value of <code>NextPageToken</code> is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 60 seconds. Using an expired pagination token will return a <code>400</code> error: "<code>Specified token has exceeded its maximum lifetime</code>". </p> /// <p>The configured <code>maximumPageSize</code> determines how many results can be returned in a single call. </p> <note> /// <p>The <code>nextPageToken</code> returned by this action cannot be used with <code>GetWorkflowExecutionHistory</code> to get the next page. You must call <code>PollForDecisionTask</code> again (with the <code>nextPageToken</code>) to retrieve the next page of history records. Calling <code>PollForDecisionTask</code> with a <code>nextPageToken</code> doesn't return a new decision task.</p> /// </note> pub fn set_next_page_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_next_page_token(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// <p>This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.</p> pub fn maximum_page_size(mut self, input: i32) -> Self { self.inner = self.inner.maximum_page_size(input); self } /// <p>The maximum number of results that are returned per call. Use <code>nextPageToken</code> to obtain further pages of results. </p> /// <p>This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum.</p> pub fn set_maximum_page_size(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_maximum_page_size(input); self } /// <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimestamp</code> of the events.</p> pub fn reverse_order(mut self, input: bool) -> Self { self.inner = self.inner.reverse_order(input); self } /// <p>When set to <code>true</code>, returns the events in reverse order. By default the results are returned in ascending order of the <code>eventTimestamp</code> of the events.</p> pub fn set_reverse_order(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_reverse_order(input); self } } /// Fluent builder constructing a request to `RecordActivityTaskHeartbeat`. /// /// <p>Used by activity workers to report to the service that the <code>ActivityTask</code> represented by the specified <code>taskToken</code> is still making progress. The worker can also specify details of the progress, for example percent complete, using the <code>details</code> parameter. This action can also be used by the worker as a mechanism to check if cancellation is being requested for the activity task. If a cancellation is being attempted for the specified task, then the boolean <code>cancelRequested</code> flag returned by the service is set to <code>true</code>.</p> /// <p>This action resets the <code>taskHeartbeatTimeout</code> clock. The <code>taskHeartbeatTimeout</code> is specified in <code>RegisterActivityType</code>.</p> /// <p>This action doesn't in itself create an event in the workflow execution history. However, if the task times out, the workflow execution history contains a <code>ActivityTaskTimedOut</code> event that contains the information from the last heartbeat generated by the activity worker.</p> <note> /// <p>The <code>taskStartToCloseTimeout</code> of an activity type is the maximum duration of an activity task, regardless of the number of <code>RecordActivityTaskHeartbeat</code> requests received. The <code>taskStartToCloseTimeout</code> is also specified in <code>RegisterActivityType</code>.</p> /// </note> <note> /// <p>This operation is only useful for long-lived activities to report liveliness of the task and to determine if a cancellation is being attempted.</p> /// </note> <important> /// <p>If the <code>cancelRequested</code> flag returns <code>true</code>, a cancellation is being attempted. If the worker can cancel the activity, it should respond with <code>RespondActivityTaskCanceled</code>. Otherwise, it should ignore the cancellation request.</p> /// </important> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RecordActivityTaskHeartbeat { handle: std::sync::Arc<super::Handle>, inner: crate::input::record_activity_task_heartbeat_input::Builder, } impl RecordActivityTaskHeartbeat { /// Creates a new `RecordActivityTaskHeartbeat`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RecordActivityTaskHeartbeatOutput, aws_smithy_http::result::SdkError<crate::error::RecordActivityTaskHeartbeatError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results. </p> /// </important> pub fn task_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_token(input.into()); self } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results. </p> /// </important> pub fn set_task_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_task_token(input); self } /// <p>If specified, contains details about the progress of the task.</p> pub fn details(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.details(input.into()); self } /// <p>If specified, contains details about the progress of the task.</p> pub fn set_details(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_details(input); self } } /// Fluent builder constructing a request to `RegisterActivityType`. /// /// <p>Registers a new <i>activity type</i> along with its configuration settings in the specified domain.</p> <important> /// <p>A <code>TypeAlreadyExists</code> fault is returned if the type already exists in the domain. You cannot change any configuration settings of the type after its registration, and it must be registered as a new version.</p> /// </important> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>defaultTaskList.name</code>: String constraint. The key is <code>swf:defaultTaskList.name</code>.</p> </li> /// <li> <p> <code>name</code>: String constraint. The key is <code>swf:name</code>.</p> </li> /// <li> <p> <code>version</code>: String constraint. The key is <code>swf:version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RegisterActivityType { handle: std::sync::Arc<super::Handle>, inner: crate::input::register_activity_type_input::Builder, } impl RegisterActivityType { /// Creates a new `RegisterActivityType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RegisterActivityTypeOutput, aws_smithy_http::result::SdkError<crate::error::RegisterActivityTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which this activity is to be registered.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which this activity is to be registered.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The name of the activity type within the domain.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>The name of the activity type within the domain.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The version of the activity type.</p> <note> /// <p>The activity type consists of the name and version, the combination of which must be unique within the domain.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn version(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.version(input.into()); self } /// <p>The version of the activity type.</p> <note> /// <p>The activity type consists of the name and version, the combination of which must be unique within the domain.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_version(input); self } /// <p>A textual description of the activity type.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(input.into()); self } /// <p>A textual description of the activity type.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// <p>If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn default_task_start_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.default_task_start_to_close_timeout(input.into()); self } /// <p>If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn set_default_task_start_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_start_to_close_timeout(input); self } /// <p>If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling <code>RecordActivityTaskHeartbeat</code>. If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an <code>UnknownResource</code> fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn default_task_heartbeat_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.default_task_heartbeat_timeout(input.into()); self } /// <p>If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling <code>RecordActivityTaskHeartbeat</code>. If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an <code>UnknownResource</code> fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn set_default_task_heartbeat_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_heartbeat_timeout(input); self } /// <p>If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list isn't provided when a task is scheduled through the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> pub fn default_task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.default_task_list(input); self } /// <p>If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list isn't provided when a task is scheduled through the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> pub fn set_default_task_list( mut self, input: std::option::Option<crate::model::TaskList>, ) -> Self { self.inner = self.inner.set_default_task_list(input); self } /// <p>The default task priority to assign to the activity type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>in the <i>Amazon SWF Developer Guide</i>.</i>.</p> pub fn default_task_priority(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.default_task_priority(input.into()); self } /// <p>The default task priority to assign to the activity type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>in the <i>Amazon SWF Developer Guide</i>.</i>.</p> pub fn set_default_task_priority( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_priority(input); self } /// <p>If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn default_task_schedule_to_start_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self .inner .default_task_schedule_to_start_timeout(input.into()); self } /// <p>If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn set_default_task_schedule_to_start_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_schedule_to_start_timeout(input); self } /// <p>If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn default_task_schedule_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self .inner .default_task_schedule_to_close_timeout(input.into()); self } /// <p>If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the <code>ScheduleActivityTask</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn set_default_task_schedule_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_schedule_to_close_timeout(input); self } } /// Fluent builder constructing a request to `RegisterDomain`. /// /// <p>Registers a new domain.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>You cannot use an IAM policy to control domain access for this action. The name of the domain being registered is available as the resource of this action.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RegisterDomain { handle: std::sync::Arc<super::Handle>, inner: crate::input::register_domain_input::Builder, } impl RegisterDomain { /// Creates a new `RegisterDomain`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RegisterDomainOutput, aws_smithy_http::result::SdkError<crate::error::RegisterDomainError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>Name of the domain to register. The name must be unique in the region that the domain is registered in.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>Name of the domain to register. The name must be unique in the region that the domain is registered in.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>A text description of the domain.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(input.into()); self } /// <p>A text description of the domain.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// <p>The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution isn't available in the results of visibility calls.</p> /// <p>If you pass the value <code>NONE</code> or <code>0</code> (zero), then the workflow execution history isn't retained. As soon as the workflow execution completes, the execution record and its history are deleted.</p> /// <p>The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html">Amazon SWF Service Limits</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn workflow_execution_retention_period_in_days( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self .inner .workflow_execution_retention_period_in_days(input.into()); self } /// <p>The duration (in days) that records and histories of workflow executions on the domain should be kept by the service. After the retention period, the workflow execution isn't available in the results of visibility calls.</p> /// <p>If you pass the value <code>NONE</code> or <code>0</code> (zero), then the workflow execution history isn't retained. As soon as the workflow execution completes, the execution record and its history are deleted.</p> /// <p>The maximum workflow execution retention period is 90 days. For more information about Amazon SWF service limits, see: <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-limits.html">Amazon SWF Service Limits</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn set_workflow_execution_retention_period_in_days( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self .inner .set_workflow_execution_retention_period_in_days(input); self } /// Appends an item to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>Tags to be added when registering a domain.</p> /// <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> pub fn tags(mut self, input: crate::model::ResourceTag) -> Self { self.inner = self.inner.tags(input); self } /// <p>Tags to be added when registering a domain.</p> /// <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::ResourceTag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `RegisterWorkflowType`. /// /// <p>Registers a new <i>workflow type</i> and its configuration settings in the specified domain.</p> /// <p>The retention period for the workflow history is set by the <code>RegisterDomain</code> action.</p> <important> /// <p>If the type already exists, then a <code>TypeAlreadyExists</code> fault is returned. You cannot change the configuration settings of a workflow type once it is registered and it must be registered as a new version.</p> /// </important> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>defaultTaskList.name</code>: String constraint. The key is <code>swf:defaultTaskList.name</code>.</p> </li> /// <li> <p> <code>name</code>: String constraint. The key is <code>swf:name</code>.</p> </li> /// <li> <p> <code>version</code>: String constraint. The key is <code>swf:version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RegisterWorkflowType { handle: std::sync::Arc<super::Handle>, inner: crate::input::register_workflow_type_input::Builder, } impl RegisterWorkflowType { /// Creates a new `RegisterWorkflowType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RegisterWorkflowTypeOutput, aws_smithy_http::result::SdkError<crate::error::RegisterWorkflowTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which to register the workflow type.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which to register the workflow type.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The name of the workflow type.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>The name of the workflow type.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } /// <p>The version of the workflow type.</p> <note> /// <p>The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the <code>ListWorkflowTypes</code> action.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn version(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.version(input.into()); self } /// <p>The version of the workflow type.</p> <note> /// <p>The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the <code>ListWorkflowTypes</code> action.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_version(input); self } /// <p>Textual description of the workflow type.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(input.into()); self } /// <p>Textual description of the workflow type.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// <p>If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn default_task_start_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.default_task_start_to_close_timeout(input.into()); self } /// <p>If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> pub fn set_default_task_start_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_start_to_close_timeout(input); self } /// <p>If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for <code>defaultExecutionStartToCloseTimeout</code>; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit always causes the workflow execution to time out.</p> pub fn default_execution_start_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self .inner .default_execution_start_to_close_timeout(input.into()); self } /// <p>If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for <code>defaultExecutionStartToCloseTimeout</code>; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit always causes the workflow execution to time out.</p> pub fn set_default_execution_start_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self .inner .set_default_execution_start_to_close_timeout(input); self } /// <p>If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list isn't provided when starting the execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> pub fn default_task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.default_task_list(input); self } /// <p>If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list isn't provided when starting the execution through the <code>StartWorkflowExecution</code> Action or <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> pub fn set_default_task_list( mut self, input: std::option::Option<crate::model::TaskList>, ) -> Self { self.inner = self.inner.set_default_task_list(input); self } /// <p>The default task priority to assign to the workflow type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn default_task_priority(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.default_task_priority(input.into()); self } /// <p>The default task priority to assign to the workflow type. If not assigned, then <code>0</code> is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn set_default_task_priority( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_task_priority(input); self } /// <p>If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> pub fn default_child_policy(mut self, input: crate::model::ChildPolicy) -> Self { self.inner = self.inner.default_child_policy(input); self } /// <p>If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the <code>StartWorkflowExecution</code> action or the <code>StartChildWorkflowExecution</code> <code>Decision</code>.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> pub fn set_default_child_policy( mut self, input: std::option::Option<crate::model::ChildPolicy>, ) -> Self { self.inner = self.inner.set_default_child_policy(input); self } /// <p>The default IAM role attached to this workflow type.</p> <note> /// <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when you start this workflow type, the default Lambda role is attached to the execution. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// </note> pub fn default_lambda_role(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.default_lambda_role(input.into()); self } /// <p>The default IAM role attached to this workflow type.</p> <note> /// <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't specify an IAM role when you start this workflow type, the default Lambda role is attached to the execution. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// </note> pub fn set_default_lambda_role( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_default_lambda_role(input); self } } /// Fluent builder constructing a request to `RequestCancelWorkflowExecution`. /// /// <p>Records a <code>WorkflowExecutionCancelRequested</code> event in the currently running workflow execution identified by the given domain, workflowId, and runId. This logically requests the cancellation of the workflow execution as a whole. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> <note> /// <p>If the runId isn't specified, the <code>WorkflowExecutionCancelRequested</code> event is recorded in the history of the current open workflow execution with the specified workflowId in the domain.</p> /// </note> <note> /// <p>Because this action allows the workflow to properly clean up and gracefully close, it should be used instead of <code>TerminateWorkflowExecution</code> when possible.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RequestCancelWorkflowExecution { handle: std::sync::Arc<super::Handle>, inner: crate::input::request_cancel_workflow_execution_input::Builder, } impl RequestCancelWorkflowExecution { /// Creates a new `RequestCancelWorkflowExecution`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RequestCancelWorkflowExecutionOutput, aws_smithy_http::result::SdkError<crate::error::RequestCancelWorkflowExecutionError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain containing the workflow execution to cancel.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow execution to cancel.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflowId of the workflow execution to cancel.</p> pub fn workflow_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.workflow_id(input.into()); self } /// <p>The workflowId of the workflow execution to cancel.</p> pub fn set_workflow_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_workflow_id(input); self } /// <p>The runId of the workflow execution to cancel.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.run_id(input.into()); self } /// <p>The runId of the workflow execution to cancel.</p> pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_run_id(input); self } } /// Fluent builder constructing a request to `RespondActivityTaskCanceled`. /// /// <p>Used by workers to tell the service that the <code>ActivityTask</code> identified by the <code>taskToken</code> was successfully canceled. Additional <code>details</code> can be provided using the <code>details</code> argument.</p> /// <p>These <code>details</code> (if provided) appear in the <code>ActivityTaskCanceled</code> event added to the workflow history.</p> <important> /// <p>Only use this operation if the <code>canceled</code> flag of a <code>RecordActivityTaskHeartbeat</code> request returns <code>true</code> and if the activity can be safely undone or abandoned.</p> /// </important> /// <p>A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to <code>RespondActivityTaskCompleted</code>, RespondActivityTaskCanceled, <code>RespondActivityTaskFailed</code>, or the task has <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types">timed out</a>.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RespondActivityTaskCanceled { handle: std::sync::Arc<super::Handle>, inner: crate::input::respond_activity_task_canceled_input::Builder, } impl RespondActivityTaskCanceled { /// Creates a new `RespondActivityTaskCanceled`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RespondActivityTaskCanceledOutput, aws_smithy_http::result::SdkError<crate::error::RespondActivityTaskCanceledError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn task_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_token(input.into()); self } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn set_task_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_task_token(input); self } /// <p> Information about the cancellation.</p> pub fn details(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.details(input.into()); self } /// <p> Information about the cancellation.</p> pub fn set_details(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_details(input); self } } /// Fluent builder constructing a request to `RespondActivityTaskCompleted`. /// /// <p>Used by workers to tell the service that the <code>ActivityTask</code> identified by the <code>taskToken</code> completed successfully with a <code>result</code> (if provided). The <code>result</code> appears in the <code>ActivityTaskCompleted</code> event in the workflow history.</p> <important> /// <p>If the requested task doesn't complete successfully, use <code>RespondActivityTaskFailed</code> instead. If the worker finds that the task is canceled through the <code>canceled</code> flag returned by <code>RecordActivityTaskHeartbeat</code>, it should cancel the task, clean up and then call <code>RespondActivityTaskCanceled</code>.</p> /// </important> /// <p>A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to RespondActivityTaskCompleted, <code>RespondActivityTaskCanceled</code>, <code>RespondActivityTaskFailed</code>, or the task has <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types">timed out</a>.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RespondActivityTaskCompleted { handle: std::sync::Arc<super::Handle>, inner: crate::input::respond_activity_task_completed_input::Builder, } impl RespondActivityTaskCompleted { /// Creates a new `RespondActivityTaskCompleted`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RespondActivityTaskCompletedOutput, aws_smithy_http::result::SdkError<crate::error::RespondActivityTaskCompletedError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn task_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_token(input.into()); self } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn set_task_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_task_token(input); self } /// <p>The result of the activity task. It is a free form string that is implementation specific.</p> pub fn result(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.result(input.into()); self } /// <p>The result of the activity task. It is a free form string that is implementation specific.</p> pub fn set_result(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_result(input); self } } /// Fluent builder constructing a request to `RespondActivityTaskFailed`. /// /// <p>Used by workers to tell the service that the <code>ActivityTask</code> identified by the <code>taskToken</code> has failed with <code>reason</code> (if specified). The <code>reason</code> and <code>details</code> appear in the <code>ActivityTaskFailed</code> event added to the workflow history.</p> /// <p>A task is considered open from the time that it is scheduled until it is closed. Therefore a task is reported as open while a worker is processing it. A task is closed after it has been specified in a call to <code>RespondActivityTaskCompleted</code>, <code>RespondActivityTaskCanceled</code>, RespondActivityTaskFailed, or the task has <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types">timed out</a>.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RespondActivityTaskFailed { handle: std::sync::Arc<super::Handle>, inner: crate::input::respond_activity_task_failed_input::Builder, } impl RespondActivityTaskFailed { /// Creates a new `RespondActivityTaskFailed`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RespondActivityTaskFailedOutput, aws_smithy_http::result::SdkError<crate::error::RespondActivityTaskFailedError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn task_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_token(input.into()); self } /// <p>The <code>taskToken</code> of the <code>ActivityTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn set_task_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_task_token(input); self } /// <p>Description of the error that may assist in diagnostics.</p> pub fn reason(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.reason(input.into()); self } /// <p>Description of the error that may assist in diagnostics.</p> pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_reason(input); self } /// <p> Detailed information about the failure.</p> pub fn details(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.details(input.into()); self } /// <p> Detailed information about the failure.</p> pub fn set_details(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_details(input); self } } /// Fluent builder constructing a request to `RespondDecisionTaskCompleted`. /// /// <p>Used by deciders to tell the service that the <code>DecisionTask</code> identified by the <code>taskToken</code> has successfully completed. The <code>decisions</code> argument specifies the list of decisions made while processing the task.</p> /// <p>A <code>DecisionTaskCompleted</code> event is added to the workflow history. The <code>executionContext</code> specified is attached to the event in the workflow execution history.</p> /// <p> <b>Access Control</b> </p> /// <p>If an IAM policy grants permission to use <code>RespondDecisionTaskCompleted</code>, it can express permissions for the list of decisions in the <code>decisions</code> parameter. Each of the decisions has one or more parameters, much like a regular API call. To allow for policies to be as readable as possible, you can express permissions on decisions as if they were actual API calls, including applying conditions to some parameters. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct RespondDecisionTaskCompleted { handle: std::sync::Arc<super::Handle>, inner: crate::input::respond_decision_task_completed_input::Builder, } impl RespondDecisionTaskCompleted { /// Creates a new `RespondDecisionTaskCompleted`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::RespondDecisionTaskCompletedOutput, aws_smithy_http::result::SdkError<crate::error::RespondDecisionTaskCompletedError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The <code>taskToken</code> from the <code>DecisionTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn task_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_token(input.into()); self } /// <p>The <code>taskToken</code> from the <code>DecisionTask</code>.</p> <important> /// <p> <code>taskToken</code> is generated by the service and should be treated as an opaque value. If the task is passed to another process, its <code>taskToken</code> must also be passed. This enables it to provide its progress and respond with results.</p> /// </important> pub fn set_task_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_task_token(input); self } /// Appends an item to `decisions`. /// /// To override the contents of this collection use [`set_decisions`](Self::set_decisions). /// /// <p>The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the <code>Decision</code> structure for details.</p> pub fn decisions(mut self, input: crate::model::Decision) -> Self { self.inner = self.inner.decisions(input); self } /// <p>The list of decisions (possibly empty) made by the decider while processing this decision task. See the docs for the <code>Decision</code> structure for details.</p> pub fn set_decisions( mut self, input: std::option::Option<std::vec::Vec<crate::model::Decision>>, ) -> Self { self.inner = self.inner.set_decisions(input); self } /// <p>User defined context to add to workflow execution.</p> pub fn execution_context(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.execution_context(input.into()); self } /// <p>User defined context to add to workflow execution.</p> pub fn set_execution_context( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_execution_context(input); self } } /// Fluent builder constructing a request to `SignalWorkflowExecution`. /// /// <p>Records a <code>WorkflowExecutionSignaled</code> event in the workflow execution history and creates a decision task for the workflow execution identified by the given domain, workflowId and runId. The event is recorded with the specified user defined signalName and input (if provided).</p> <note> /// <p>If a runId isn't specified, then the <code>WorkflowExecutionSignaled</code> event is recorded in the history of the current open workflow with the matching workflowId in the domain.</p> /// </note> <note> /// <p>If the specified workflow execution isn't open, this method fails with <code>UnknownResource</code>.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct SignalWorkflowExecution { handle: std::sync::Arc<super::Handle>, inner: crate::input::signal_workflow_execution_input::Builder, } impl SignalWorkflowExecution { /// Creates a new `SignalWorkflowExecution`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::SignalWorkflowExecutionOutput, aws_smithy_http::result::SdkError<crate::error::SignalWorkflowExecutionError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain containing the workflow execution to signal.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain containing the workflow execution to signal.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflowId of the workflow execution to signal.</p> pub fn workflow_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.workflow_id(input.into()); self } /// <p>The workflowId of the workflow execution to signal.</p> pub fn set_workflow_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_workflow_id(input); self } /// <p>The runId of the workflow execution to signal.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.run_id(input.into()); self } /// <p>The runId of the workflow execution to signal.</p> pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_run_id(input); self } /// <p>The name of the signal. This name must be meaningful to the target workflow.</p> pub fn signal_name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.signal_name(input.into()); self } /// <p>The name of the signal. This name must be meaningful to the target workflow.</p> pub fn set_signal_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_signal_name(input); self } /// <p>Data to attach to the <code>WorkflowExecutionSignaled</code> event in the target workflow execution's history.</p> pub fn input(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.input(input.into()); self } /// <p>Data to attach to the <code>WorkflowExecutionSignaled</code> event in the target workflow execution's history.</p> pub fn set_input(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_input(input); self } } /// Fluent builder constructing a request to `StartWorkflowExecution`. /// /// <p>Starts an execution of the workflow type in the specified domain using the provided <code>workflowId</code> and input data.</p> /// <p>This action returns the newly started workflow execution.</p> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>tagList.member.0</code>: The key is <code>swf:tagList.member.0</code>.</p> </li> /// <li> <p> <code>tagList.member.1</code>: The key is <code>swf:tagList.member.1</code>.</p> </li> /// <li> <p> <code>tagList.member.2</code>: The key is <code>swf:tagList.member.2</code>.</p> </li> /// <li> <p> <code>tagList.member.3</code>: The key is <code>swf:tagList.member.3</code>.</p> </li> /// <li> <p> <code>tagList.member.4</code>: The key is <code>swf:tagList.member.4</code>.</p> </li> /// <li> <p> <code>taskList</code>: String constraint. The key is <code>swf:taskList.name</code>.</p> </li> /// <li> <p> <code>workflowType.name</code>: String constraint. The key is <code>swf:workflowType.name</code>.</p> </li> /// <li> <p> <code>workflowType.version</code>: String constraint. The key is <code>swf:workflowType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct StartWorkflowExecution { handle: std::sync::Arc<super::Handle>, inner: crate::input::start_workflow_execution_input::Builder, } impl StartWorkflowExecution { /// Creates a new `StartWorkflowExecution`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::StartWorkflowExecutionOutput, aws_smithy_http::result::SdkError<crate::error::StartWorkflowExecutionError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain in which the workflow execution is created.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain in which the workflow execution is created.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a <i>restart</i> of a previous execution. You cannot have two open workflow executions with the same <code>workflowId</code> at the same time within the same domain.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn workflow_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.workflow_id(input.into()); self } /// <p>The user defined identifier associated with the workflow execution. You can use this to associate a custom identifier with the workflow execution. You may specify the same identifier if a workflow execution is logically a <i>restart</i> of a previous execution. You cannot have two open workflow executions with the same <code>workflowId</code> at the same time within the same domain.</p> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_workflow_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_workflow_id(input); self } /// <p>The type of the workflow to start.</p> pub fn workflow_type(mut self, input: crate::model::WorkflowType) -> Self { self.inner = self.inner.workflow_type(input); self } /// <p>The type of the workflow to start.</p> pub fn set_workflow_type( mut self, input: std::option::Option<crate::model::WorkflowType>, ) -> Self { self.inner = self.inner.set_workflow_type(input); self } /// <p>The task list to use for the decision tasks generated for this workflow execution. This overrides the <code>defaultTaskList</code> specified when registering the workflow type.</p> <note> /// <p>A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn task_list(mut self, input: crate::model::TaskList) -> Self { self.inner = self.inner.task_list(input); self } /// <p>The task list to use for the decision tasks generated for this workflow execution. This overrides the <code>defaultTaskList</code> specified when registering the workflow type.</p> <note> /// <p>A task list for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task list was specified at registration time then a fault is returned.</p> /// </note> /// <p>The specified string must not start or end with whitespace. It must not contain a <code>:</code> (colon), <code>/</code> (slash), <code>|</code> (vertical bar), or any control characters (<code>\u0000-\u001f</code> | <code>\u007f-\u009f</code>). Also, it must not <i>be</i> the literal string <code>arn</code>.</p> pub fn set_task_list(mut self, input: std::option::Option<crate::model::TaskList>) -> Self { self.inner = self.inner.set_task_list(input); self } /// <p>The task priority to use for this workflow execution. This overrides any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn task_priority(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.task_priority(input.into()); self } /// <p>The task priority to use for this workflow execution. This overrides any default priority that was assigned when the workflow type was registered. If not set, then the default task priority for the workflow type is used. Valid values are integers that range from Java's <code>Integer.MIN_VALUE</code> (-2147483648) to <code>Integer.MAX_VALUE</code> (2147483647). Higher numbers indicate higher priority.</p> /// <p>For more information about setting task priority, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/programming-priority.html">Setting Task Priority</a> in the <i>Amazon SWF Developer Guide</i>.</p> pub fn set_task_priority( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_task_priority(input); self } /// <p>The input for the workflow execution. This is a free form string which should be meaningful to the workflow you are starting. This <code>input</code> is made available to the new workflow execution in the <code>WorkflowExecutionStarted</code> history event.</p> pub fn input(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.input(input.into()); self } /// <p>The input for the workflow execution. This is a free form string which should be meaningful to the workflow you are starting. This <code>input</code> is made available to the new workflow execution in the <code>WorkflowExecutionStarted</code> history event.</p> pub fn set_input(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_input(input); self } /// <p>The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.</p> /// <p>The duration is specified in seconds; an integer greater than or equal to <code>0</code>. Exceeding this limit causes the workflow execution to time out. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for this timeout; there is a one-year max limit on the time that a workflow execution can run.</p> <note> /// <p>An execution start-to-close timeout must be specified either through this parameter or as a default when the workflow type is registered. If neither this parameter nor a default execution start-to-close timeout is specified, a fault is returned.</p> /// </note> pub fn execution_start_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.execution_start_to_close_timeout(input.into()); self } /// <p>The total duration for this workflow execution. This overrides the defaultExecutionStartToCloseTimeout specified when registering the workflow type.</p> /// <p>The duration is specified in seconds; an integer greater than or equal to <code>0</code>. Exceeding this limit causes the workflow execution to time out. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for this timeout; there is a one-year max limit on the time that a workflow execution can run.</p> <note> /// <p>An execution start-to-close timeout must be specified either through this parameter or as a default when the workflow type is registered. If neither this parameter nor a default execution start-to-close timeout is specified, a fault is returned.</p> /// </note> pub fn set_execution_start_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_execution_start_to_close_timeout(input); self } /// Appends an item to `tagList`. /// /// To override the contents of this collection use [`set_tag_list`](Self::set_tag_list). /// /// <p>The list of tags to associate with the workflow execution. You can specify a maximum of 5 tags. You can list workflow executions with a specific tag by calling <code>ListOpenWorkflowExecutions</code> or <code>ListClosedWorkflowExecutions</code> and specifying a <code>TagFilter</code>.</p> pub fn tag_list(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.tag_list(input.into()); self } /// <p>The list of tags to associate with the workflow execution. You can specify a maximum of 5 tags. You can list workflow executions with a specific tag by calling <code>ListOpenWorkflowExecutions</code> or <code>ListClosedWorkflowExecutions</code> and specifying a <code>TagFilter</code>.</p> pub fn set_tag_list( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_tag_list(input); self } /// <p>Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the <code>defaultTaskStartToCloseTimout</code> specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> <note> /// <p>A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault is returned.</p> /// </note> pub fn task_start_to_close_timeout( mut self, input: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.task_start_to_close_timeout(input.into()); self } /// <p>Specifies the maximum duration of decision tasks for this workflow execution. This parameter overrides the <code>defaultTaskStartToCloseTimout</code> specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> /// <p>The duration is specified in seconds, an integer greater than or equal to <code>0</code>. You can use <code>NONE</code> to specify unlimited duration.</p> <note> /// <p>A task start-to-close timeout for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default task start-to-close timeout was specified at registration time then a fault is returned.</p> /// </note> pub fn set_task_start_to_close_timeout( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_task_start_to_close_timeout(input); self } /// <p>If set, specifies the policy to use for the child workflow executions of this workflow execution if it is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> <note> /// <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> /// </note> pub fn child_policy(mut self, input: crate::model::ChildPolicy) -> Self { self.inner = self.inner.child_policy(input); self } /// <p>If set, specifies the policy to use for the child workflow executions of this workflow execution if it is terminated, by calling the <code>TerminateWorkflowExecution</code> action explicitly or due to an expired timeout. This policy overrides the default child policy specified when registering the workflow type using <code>RegisterWorkflowType</code>.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> <note> /// <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> /// </note> pub fn set_child_policy( mut self, input: std::option::Option<crate::model::ChildPolicy>, ) -> Self { self.inner = self.inner.set_child_policy(input); self } /// <p>The IAM role to attach to this workflow execution.</p> <note> /// <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't attach an IAM role, any attempt to schedule a Lambda task fails. This results in a <code>ScheduleLambdaFunctionFailed</code> history event. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// </note> pub fn lambda_role(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.lambda_role(input.into()); self } /// <p>The IAM role to attach to this workflow execution.</p> <note> /// <p>Executions of this workflow type need IAM roles to invoke Lambda functions. If you don't attach an IAM role, any attempt to schedule a Lambda task fails. This results in a <code>ScheduleLambdaFunctionFailed</code> history event. For more information, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html">https://docs.aws.amazon.com/amazonswf/latest/developerguide/lambda-task.html</a> in the <i>Amazon SWF Developer Guide</i>.</p> /// </note> pub fn set_lambda_role(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_lambda_role(input); self } } /// Fluent builder constructing a request to `TagResource`. /// /// <p>Add a tag to a Amazon SWF domain.</p> <note> /// <p>Amazon SWF supports a maximum of 50 tags per resource.</p> /// </note> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct TagResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::tag_resource_input::Builder, } impl TagResource { /// Creates a new `TagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::TagResourceOutput, aws_smithy_http::result::SdkError<crate::error::TagResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>The list of tags to add to a domain. </p> /// <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> pub fn tags(mut self, input: crate::model::ResourceTag) -> Self { self.inner = self.inner.tags(input); self } /// <p>The list of tags to add to a domain. </p> /// <p>Tags may only contain unicode letters, digits, whitespace, or these symbols: <code>_ . : / = + - @</code>.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::ResourceTag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `TerminateWorkflowExecution`. /// /// <p>Records a <code>WorkflowExecutionTerminated</code> event and forces closure of the workflow execution identified by the given domain, runId, and workflowId. The child policy, registered with the workflow type or specified when starting this execution, is applied to any open child workflow executions of this workflow execution.</p> <important> /// <p>If the identified workflow execution was in progress, it is terminated immediately.</p> /// </important> <note> /// <p>If a runId isn't specified, then the <code>WorkflowExecutionTerminated</code> event is recorded in the history of the current open workflow with the matching workflowId in the domain.</p> /// </note> <note> /// <p>You should consider using <code>RequestCancelWorkflowExecution</code> action instead because it allows the workflow to gracefully close while <code>TerminateWorkflowExecution</code> doesn't.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct TerminateWorkflowExecution { handle: std::sync::Arc<super::Handle>, inner: crate::input::terminate_workflow_execution_input::Builder, } impl TerminateWorkflowExecution { /// Creates a new `TerminateWorkflowExecution`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::TerminateWorkflowExecutionOutput, aws_smithy_http::result::SdkError<crate::error::TerminateWorkflowExecutionError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The domain of the workflow execution to terminate.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The domain of the workflow execution to terminate.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The workflowId of the workflow execution to terminate.</p> pub fn workflow_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.workflow_id(input.into()); self } /// <p>The workflowId of the workflow execution to terminate.</p> pub fn set_workflow_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_workflow_id(input); self } /// <p>The runId of the workflow execution to terminate.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.run_id(input.into()); self } /// <p>The runId of the workflow execution to terminate.</p> pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_run_id(input); self } /// <p> A descriptive reason for terminating the workflow execution.</p> pub fn reason(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.reason(input.into()); self } /// <p> A descriptive reason for terminating the workflow execution.</p> pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_reason(input); self } /// <p> Details for terminating the workflow execution.</p> pub fn details(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.details(input.into()); self } /// <p> Details for terminating the workflow execution.</p> pub fn set_details(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_details(input); self } /// <p>If set, specifies the policy to use for the child workflow executions of the workflow execution being terminated. This policy overrides the child policy specified for the workflow execution at registration time or when starting the execution.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> <note> /// <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> /// </note> pub fn child_policy(mut self, input: crate::model::ChildPolicy) -> Self { self.inner = self.inner.child_policy(input); self } /// <p>If set, specifies the policy to use for the child workflow executions of the workflow execution being terminated. This policy overrides the child policy specified for the workflow execution at registration time or when starting the execution.</p> /// <p>The supported child policies are:</p> /// <ul> /// <li> <p> <code>TERMINATE</code> – The child executions are terminated.</p> </li> /// <li> <p> <code>REQUEST_CANCEL</code> – A request to cancel is attempted for each child execution by recording a <code>WorkflowExecutionCancelRequested</code> event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.</p> </li> /// <li> <p> <code>ABANDON</code> – No action is taken. The child executions continue to run.</p> </li> /// </ul> <note> /// <p>A child policy for this workflow execution must be specified either as a default for the workflow type or through this parameter. If neither this parameter is set nor a default child policy was specified at registration time then a fault is returned.</p> /// </note> pub fn set_child_policy( mut self, input: std::option::Option<crate::model::ChildPolicy>, ) -> Self { self.inner = self.inner.set_child_policy(input); self } } /// Fluent builder constructing a request to `UndeprecateActivityType`. /// /// <p>Undeprecates a previously deprecated <i>activity type</i>. After an activity type has been undeprecated, you can create new tasks of that activity type.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>activityType.name</code>: String constraint. The key is <code>swf:activityType.name</code>.</p> </li> /// <li> <p> <code>activityType.version</code>: String constraint. The key is <code>swf:activityType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UndeprecateActivityType { handle: std::sync::Arc<super::Handle>, inner: crate::input::undeprecate_activity_type_input::Builder, } impl UndeprecateActivityType { /// Creates a new `UndeprecateActivityType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UndeprecateActivityTypeOutput, aws_smithy_http::result::SdkError<crate::error::UndeprecateActivityTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain of the deprecated activity type.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain of the deprecated activity type.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The activity type to undeprecate.</p> pub fn activity_type(mut self, input: crate::model::ActivityType) -> Self { self.inner = self.inner.activity_type(input); self } /// <p>The activity type to undeprecate.</p> pub fn set_activity_type( mut self, input: std::option::Option<crate::model::ActivityType>, ) -> Self { self.inner = self.inner.set_activity_type(input); self } } /// Fluent builder constructing a request to `UndeprecateDomain`. /// /// <p>Undeprecates a previously deprecated domain. After a domain has been undeprecated it can be used to create new workflow executions or register new types.</p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>You cannot use an IAM policy to constrain this action's parameters.</p> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UndeprecateDomain { handle: std::sync::Arc<super::Handle>, inner: crate::input::undeprecate_domain_input::Builder, } impl UndeprecateDomain { /// Creates a new `UndeprecateDomain`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UndeprecateDomainOutput, aws_smithy_http::result::SdkError<crate::error::UndeprecateDomainError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.name(input.into()); self } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_name(input); self } } /// Fluent builder constructing a request to `UndeprecateWorkflowType`. /// /// <p>Undeprecates a previously deprecated <i>workflow type</i>. After a workflow type has been undeprecated, you can create new executions of that type. </p> <note> /// <p>This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes.</p> /// </note> /// <p> <b>Access Control</b> </p> /// <p>You can use IAM policies to control this action's access to Amazon SWF resources as follows:</p> /// <ul> /// <li> <p>Use a <code>Resource</code> element with the domain name to limit the action to only specified domains.</p> </li> /// <li> <p>Use an <code>Action</code> element to allow or deny permission to call this action.</p> </li> /// <li> <p>Constrain the following parameters by using a <code>Condition</code> element with the appropriate keys.</p> /// <ul> /// <li> <p> <code>workflowType.name</code>: String constraint. The key is <code>swf:workflowType.name</code>.</p> </li> /// <li> <p> <code>workflowType.version</code>: String constraint. The key is <code>swf:workflowType.version</code>.</p> </li> /// </ul> </li> /// </ul> /// <p>If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's <code>cause</code> parameter is set to <code>OPERATION_NOT_PERMITTED</code>. For details and example IAM policies, see <a href="https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html">Using IAM to Manage Access to Amazon SWF Workflows</a> in the <i>Amazon SWF Developer Guide</i>.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UndeprecateWorkflowType { handle: std::sync::Arc<super::Handle>, inner: crate::input::undeprecate_workflow_type_input::Builder, } impl UndeprecateWorkflowType { /// Creates a new `UndeprecateWorkflowType`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UndeprecateWorkflowTypeOutput, aws_smithy_http::result::SdkError<crate::error::UndeprecateWorkflowTypeError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn domain(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.domain(input.into()); self } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn set_domain(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_domain(input); self } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn workflow_type(mut self, input: crate::model::WorkflowType) -> Self { self.inner = self.inner.workflow_type(input); self } /// <p>The name of the domain of the deprecated workflow type.</p> pub fn set_workflow_type( mut self, input: std::option::Option<crate::model::WorkflowType>, ) -> Self { self.inner = self.inner.set_workflow_type(input); self } } /// Fluent builder constructing a request to `UntagResource`. /// /// <p>Remove a tag from a Amazon SWF domain.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UntagResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::untag_resource_input::Builder, } impl UntagResource { /// Creates a new `UntagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UntagResourceOutput, aws_smithy_http::result::SdkError<crate::error::UntagResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) for the Amazon SWF domain.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `tagKeys`. /// /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys). /// /// <p>The list of tags to remove from the Amazon SWF domain.</p> pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.tag_keys(input.into()); self } /// <p>The list of tags to remove from the Amazon SWF domain.</p> pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_tag_keys(input); self } } } impl Client { /// Creates a client with the given service config and connector override. pub fn from_conf_conn<C, E>(conf: crate::Config, conn: C) -> Self where C: aws_smithy_client::bounds::SmithyConnector<Error = E> + Send + 'static, E: Into<aws_smithy_http::result::ConnectorError>, { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::new() .connector(aws_smithy_client::erase::DynConnector::new(conn)) .middleware(aws_smithy_client::erase::DynMiddleware::new( crate::middleware::DefaultMiddleware::new(), )); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Creates a new client from a shared config. #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn new(sdk_config: &aws_types::sdk_config::SdkConfig) -> Self { Self::from_conf(sdk_config.into()) } /// Creates a new client from the service [`Config`](crate::Config). #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn from_conf(conf: crate::Config) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::dyn_https().middleware( aws_smithy_client::erase::DynMiddleware::new( crate::middleware::DefaultMiddleware::new(), ), ); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); // the builder maintains a try-state. To avoid suppressing the warning when sleep is unset, // only set it if we actually have a sleep impl. if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } }
/// </note> pub fn tag_filter(mut self, input: crate::model::TagFilter) -> Self { self.inner = self.inner.tag_filter(input); self
config.js
/** * The default configuation for the ParticleField component * * Any option passed in via props will overwrite the default config */ export default { showCube: true, dimension: '3D', velocity: 2, lines: { colorMode: 'rainbow', color: '#351CCB', transparency: 0.9, limitConnections: true, maxConnections: 20, minDistance: 150, visible: true }, particles: { colorMode: 'rainbow', color: '#3FB568', transparency: 0.9, shape: 'square', boundingBox: 'canvas', count: 500, minSize: 10, maxSize: 75, visible: true }, cameraControls: { enabled: true, enableDamping: true, dampingFactor: 0.2, enableZoom: true, autoRotate: true, autoRotateSpeed: 0.3, resetCameraFlag: false }
};
action.service.ts
import {Injectable} from '@angular/core'; import {Observer, Observable} from 'rxjs'; @Injectable() export class ActionService<T> { private actionObservers: {[actionName: string]: Observer<{element: T, data: any}>} = {}; private actionObservable: {[actionName: string]: Observable<{element: T, data: any}>} = {}; public registerAction(action: string): Observable<{element: T, data: any}> { if (action in this.actionObservable) { return this.actionObservable[action]; } this.actionObservable[action] = Observable.create((function (observer) { this.actionObservers[action] = observer; }).bind(this));
public emitAction(actionName: string, element: T, data?: any) { if (!(actionName in this.actionObservers)) { throw new Error('action: `' + actionName + '\' was not registered'); } this.actionObservers[actionName].next({element: element, data: data}); return false; } }
return this.actionObservable[action]; }
included_dimension_props.py
""" OpenAPI definition No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v0 Contact: [email protected] Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from gooddata_afm_client.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from gooddata_afm_client.exceptions import ApiAttributeError class IncludedDimensionProps(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type():
_nullable = True @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'dimension_attributes_values': ({str: ([str],)},), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'dimension_attributes_values': 'dimensionAttributesValues', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, dimension_attributes_values, *args, **kwargs): # noqa: E501 """IncludedDimensionProps - a model defined in OpenAPI Args: dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.dimension_attributes_values = dimension_attributes_values for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, dimension_attributes_values, *args, **kwargs): # noqa: E501 """IncludedDimensionProps - a model defined in OpenAPI Args: dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.dimension_attributes_values = dimension_attributes_values for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
""" This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
index.js
export * from './style' export * from './styles/index'
export * from './th' export * from './variant' export * from './breakpoints' export { getBreakpoints } from './media' export { merge } from './util'
HydrusExceptions.py
import collections.abc import os class HydrusException( Exception ): def __str__( self ): if isinstance( self.args, collections.abc.Iterable ):
for arg in self.args: try: s.append( str( arg ) ) except: s.append( repr( arg ) ) else: s = [ repr( self.args ) ] return os.linesep.join( s ) class CantRenderWithCVException( HydrusException ): pass class DataMissing( HydrusException ): pass class DBException( HydrusException ): def __init__( self, e, first_line, db_traceback ): self.db_e = e HydrusException.__init__( self, first_line, db_traceback ) class DBAccessException( HydrusException ): pass class DBCredentialsException( HydrusException ): pass class FileMissingException( HydrusException ): pass class DirectoryMissingException( HydrusException ): pass class SerialisationException( HydrusException ): pass class NameException( HydrusException ): pass class ShutdownException( HydrusException ): pass class QtDeadWindowException(HydrusException): pass class VetoException( HydrusException ): pass class CancelledException( VetoException ): pass class UnsupportedFileException( VetoException ): pass class DamagedOrUnusualFileException( UnsupportedFileException ): pass class FileSizeException( UnsupportedFileException ): pass class DecompressionBombException( FileSizeException ): pass class TagSizeException( VetoException ): pass class ParseException( HydrusException ): pass class StringConvertException( ParseException ): pass class StringMatchException( ParseException ): pass class StringSplitterException( ParseException ): pass class URLClassException( ParseException ): pass class GUGException( ParseException ): pass class NetworkException( HydrusException ): pass class NetworkInfrastructureException( NetworkException ): pass class ConnectionException( NetworkInfrastructureException ): pass class FirewallException( NetworkInfrastructureException ): pass class RouterException( NetworkInfrastructureException ): pass class CloudFlareException( NetworkInfrastructureException ): pass class BandwidthException( NetworkInfrastructureException ): pass class ServerException( NetworkInfrastructureException ): pass class ServerBusyException( NetworkInfrastructureException ): pass class StreamTimeoutException( NetworkException ): pass class NetworkVersionException( NetworkException ): pass class NoContentException( NetworkException ): pass class NotFoundException( NetworkException ): pass class NotModifiedException( NetworkException ): pass class BadRequestException( NetworkException ): pass class ConflictException( NetworkException ): pass class MissingCredentialsException( NetworkException ): pass class DoesNotSupportCORSException( NetworkException ): pass class InsufficientCredentialsException( NetworkException ): pass class RedirectionException( NetworkException ): pass class SessionException( NetworkException ): pass class WrongServiceTypeException( NetworkException ): pass class ValidationException( NetworkException ): pass class ShouldReattemptNetworkException( NetworkException ): pass
s = []
util.go
// Copyright 2016 Albert Nigmatzianov. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package util import ( "os" "path/filepath" "strconv" "strings" ) const ( secondsInMinute = 60 minutesInHour = 60 ) func ParseDuration(secs int) (seconds, minutes, hours int) { seconds = secs if seconds >= secondsInMinute { minutes = seconds / secondsInMinute seconds -= minutes * secondsInMinute } if minutes >= minutesInHour { hours = minutes / minutesInHour minutes -= hours * minutesInHour } return } func DurationString(seconds, minutes, hours int) (duration string) { duration = formatNumber(minutes) + ":" + formatNumber(seconds) if hours > 0 { duration = formatNumber(hours) + ":" + duration } return } func formatNumber(num int) (formatted string) { if num < 10
formatted += strconv.Itoa(num) return } func SanitizePath(path string) string { if strings.HasPrefix(path, "~") { path = strings.Replace(path, "~", os.Getenv("HOME"), 1) } return filepath.Clean(path) }
{ formatted += "0" }
sc2autosave.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """sc2autosave is a utility for reorganizing and renaming Starcraft II files. Overview ============== sc2autosave provides a simple mechanism for renaming replay files as they are copied or moved from a source directory to a destination directory. In between runs the state is stored in the sc2autosave.dat file saved to the destination folder. In this way, multiple destination folders with different organizations and formats can be maintained independently. General Operation ------------------- When first run for a given destination directory, sc2autosave scans for all files since the epoch. Each subsequent run scans only for files new files since the previous scan time. This behavior can be modified on a run by run basis by with the --since DATETIME option. By default the source directory is scanned recursively. The --depth DEPTH option can limit and/or eliminate this is recursion. Files identified as new are then copied to the destination directory. The --move option can override this behavior. The default behavior is a good idea because it ensures that there is a backup copy and allows for several different file structures to be constructed with different sc2autosave configurations for easy replay navigation. You might keep your replay files redundantly stored sorted by format, by map, and by matchup for easy lookup later on. While normally run as a batch process, the --period SECONDS option can be used to run sc2autosave as a background process, scanning the directory for changes every SECONDS seconds. This is useful for creating background processes on operating system start up. Renaming Replays -------------------- The --rename option allows you to specify a renaming format string. The string is constructed the pythonic (3.0) way with {:field} indicating the substitution of a field. The forward slash (/) is a special character here which terminates a folder name and allows for organization into subdirectories. All other string characters form the template into which the fields are inserted. Fields related to dates and times (:date, :datetime, :length fields) can be formatted through their respective directives (--date, --datetime, --length) according to python date formatting conventions. Additionally, the player display format can be refined with the --player-format FORMAT directive which is interpreted similarly to the --rename FORMAT directive detailed above. Once content has been defined to your tastes you may wish to get specific about the ordering of the teams and players on those teams in the replay name. The --team-order-by and --player-order-by directives can be used for this purpose. A common preference is to favor specific players (like yourself and friends) and their teams in the ordering by placing them first in the listing. The --favor PLAYER1 [PLAYER2] directive supports this preference. Filtering Replays --------------------- Once a replay has been scanned and parsed you have an opportunity to filter it for inclusion in the destination directory. This is useful when constructing various different types of replay packs for distribution and review. Replays are small and Battle.net has a terrible filesystem based replay locator; why not make your life easier with a little duplication. --filter-players PLAYER [PLAYER ...] --filter-matchup MATCHUP [MATCHUP ...] --filter-map NAME [NAME ...] --filter-length LOW HIGH --filter-date START END Example Configurations ------------------------ This first basic configuration sets up a background process to copy new replays without renaming to a 'Saved' subdirectory every 10 seconds. The depth 0 option keeps the script from looking into the 'Saved' subdirectory. sc2autosave \ --source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \ --dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \ --period 10 \ --depth 0 This next configuration runs in batch mode using the default renaming format. sc2autosave \ --source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \ --dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \ --rename (ZvP) Lost Temple: ShadesofGray(Z) vs Trisfall(P).SC2Replay (ZZvPP) Shattered Temple: ShadesofGray(Z), Remedy(Z) vs ProfProbe(P), Trisfall(P).SC2Replay Here is a heavily customized format that organizes replays into subdirectories by replay format and favors ShadesofGray in the player and team orderings. sc2autosave \ --source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \ --dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \ --rename "{:format}/{:matchup} on {:map}: {:teams}" \ --player-format "{:name}({:play_race})" \ --team-order-by number \ --player-order-by name \ --favored ShadesofGray 1v1/ZvP on Lost Temple: ShadesofGray(Z) vs Trisfall(P).SC2Replay 2v2/ZZvPP on Shattered Temple: ShadesofGray(Z), Remedy(Z) vs ProfProbe(P), Trisfall(P).SC2Replay Next is another customized format which organizes replays by matchup. It uses strict player and team ordering by number with no exceptions and formats game length to show both minutes and seconds. sc2autosave \ --source ~/My\ Documents/Starcraft\ II/Accounts/.../Mutliplayer \ --dest ~/My\ Documents/Starcraft\ II/Accounts/.../Multiplater/Saved \ --rename "{:matchup}/({:length}) {:map}: {:teams}" \ --player-format "{:name}({:play_race})" \ --team-order-by number \ --player-order-by number \ --length "%M:%S" PvZ/(20:14) Lost Temple: Trisfall(P) vs ShadesofGray(Z).SC2Replay ZZvPP/(35:40) Shattered Temple: Remedy(Z), ShadesofGray(Z) vs Trisfall(P), ProfProbe(P).SC2Replay Complete Reference Guide --------------------------- --source SOURCE_FOLDER The source folder to scan for replays. Uses recursive scan by default. --dest DESTINATION_FOLDER The destination folder to place replays into. --depth DEPTH Allows recursion to be limited and/or disabled (with DEPTH=0). --period SECONDS Puts sc2autosave into continuous mode, scanning the directory for new files every SECONDS seconds. --rename FORMAT :map - Inserts the map name. :date - Inserts a string formatted datetime object using --date-format. :length - Inserts a string formatted time object using --length-format. :teams - Inserts a comma separated player list. Teams are separated with a ' vs ' string. Format the player with --player-format. :format - Inserts the map format (1v1, 2v2, 3v3, etc) :matchup - Inserts the matchup (ZvZ, PTvTZ, etc). The matchup is in team order with races ordered alphabetically; not by player! This makes matchups more consistent and useful for sorting. --length-format FORMAT --player-format FORMAT --date-format FORMAT --team-order-by FIELD --player-order-by FIELD --favored NAME [NAME,...] POST-Parse filtering vs preparse filtering? POST-Parse, how to do it?!?!?!?! """ import argparse import cPickle import os import shutil import sys import time import sc2reader try: raw_input # Python 2 except NameError: raw_input = input # Python 3 def run(args): # Reset wipes the destination clean so we can start over. if args.reset: reset(args) # Set up validates the destination and source directories. # It also loads the previous state or creates one as necessary. state = setup(args) # We break out of this loop in batch mode and on KeyboardInterrupt while True: # The file scan uses the arguments and the state to filter down to # only new (since the last sync time) files. for path in scan(args, state): try: # Read the file and expose useful aspects for renaming/filtering replay = sc2reader.load_replay(path, load_level=2) except KeyboardInterrupt: raise except: # Failure to parse file_name = os.path.basename(path) directory = make_directory(args, ("parse_error",)) new_path = os.path.join(directory, file_name) source_path = path[len(args.source) :] args.log.write("Error parsing replay: {0}".format(source_path)) if not args.dryrun: args.action.run(path, new_path) # Skip to the next replay continue aspects = generate_aspects(args, replay) # Use the filter args to select files based on replay attributes if filter_out_replay(args, replay): continue # Apply the aspects to the rename formatting. #'/' is a special character for creation of subdirectories. # TODO: Handle duplicate replay names, its possible.. path_parts = args.rename.format(**aspects).split("/") filename = path_parts.pop() + ".SC2Replay" # Construct the directory and file paths; create needed directories directory = make_directory(args, path_parts) new_path = os.path.join(directory, filename) # Find the source relative to the source directory for reporting dest_path = new_path[len(args.dest) :] source_path = path[len(args.source) :] # Log the action and run it if we are live msg = "{0}:\n\tSource: {1}\n\tDest: {2}\n" args.log.write(msg.format(args.action.type, source_path, dest_path)) if not args.dryrun: args.action.run(path, new_path) # After every batch completes, save the state and flush the log # TODO: modify the state to include a list of remaining files args.log.flush() save_state(state, args) # We only run once in batch mode! if args.mode == "BATCH": break # Since new replays come in fairly infrequently, reduce system load # by sleeping for an acceptable response time before the next scan. time.sleep(args.period) args.log.write("Batch Completed") def filter_out_replay(args, replay): player_names = set([player.name for player in replay.players]) filter_out_player = not set(args.filter_player) & player_names if args.filter_rule == "ALLOW": return filter_out_player else: return not filter_out_player # We need to create these compare functions at runtime because the ordering # hinges on the --favored PLAYER options passed in from the command line. def create_compare_funcs(args): favored_set = set(name.lower() for name in args.favored) def player_compare(player1, player2): # Normalize the player names and generate our key metrics player1_name = player1.name.lower() player2_name = player2.name.lower() player1_favored = player1_name in favored_set player2_favored = player2_name in favored_set # The favored player always comes first in the ordering if player1_favored and not player2_favored: return -1 elif player2_favored and not player1_favored: return 1 # The most favored person will always be listed first elif player1_favored and player2_favored: player1_index = args.favored.index(player1_name) player2_index = args.favored.index(player2_name) return player1_index - player2_index # If neither is favored, we'll order by number for now # TODO: Allow command line specification of other orderings (maybe?) else: return player1.pid - player2.pid def team_compare(team1, team2): # Normalize the team name lists and generate our key metrics team1_names = set(p.name.lower() for p in team1.players) team2_names = set(p.name.lower() for p in team2.players) team1_favored = team1_names & favored_set team2_favored = team2_names & favored_set # The team with the favored players will always be listed first if team1_favored and not team2_favored: return -1 elif team2_favored and not team1_favored: return 1 # The team with the most favored person will always come first elif team1_favored and team2_favored: team1_best = sorted(args.favored.index(n) for n in team1_favored) team2_best = sorted(args.favored.index(n) for n in team2_favored) return team1_best[-1] - team2_best[-1] # If neither is favored, we'll order by number for now # TODO: Allow command line specification of other orderings (maybe?) else: return team1.number - team2.number return team_compare, player_compare def generate_aspects(args, replay): teams = sorted(replay.teams, args.team_compare) matchups, team_strings = list(), list() for team in teams: team.players = sorted(team.players, args.player_compare) composition = sorted(p.play_race[0].upper() for p in team.players) matchups.append("".join(composition)) string = ", ".join(p.format(args.player_format) for p in team.players) team_strings.append(string) return sc2reader.utils.AttributeDict( result=teams[0].result, length=replay.length, map=replay.map, type=replay.type, date=replay.date.strftime(args.date_format), matchup="v".join(matchups), teams=" vs ".join(team_strings), ) def
(args, path_parts): directory = args.dest for part in path_parts: directory = os.path.join(directory, part) if not os.path.exists(directory): args.log.write("Creating subfolder: {0}\n".format(directory)) if not args.dryrun: os.mkdir(directory) elif not os.path.isdir(directory): exit("Cannot create subfolder. Path is occupied: {0}", directory) return directory def scan(args, state): args.log.write("SCANNING: {0}\n".format(args.source)) files = sc2reader.utils.get_files( path=args.source, regex=args.exclude_files, allow=False, exclude=args.exclude_dirs, depth=args.depth, followlinks=args.follow_links, ) return filter(lambda f: os.path.getctime(f) > state.last_sync, files) def exit(msg, *args, **kwargs): sys.exit(msg.format(*args, **kwargs) + "\n\nScript Aborted.") def reset(args): if not os.path.exists(args.dest): exit("Cannot reset, destination does not exist: {0}", args.dest) elif not os.path.isdir(args.dest): exit("Cannot reset, destination must be directory: {0}", args.dest) print( "About to reset directory: {0}\nAll files and subdirectories will be removed.".format( args.dest ) ) choice = raw_input("Proceed anyway? (y/n) ") if choice.lower() == "y": args.log.write("Removing old directory: {0}\n".format(args.dest)) if not args.dryrun: print(args.dest) shutil.rmtree(args.dest) else: sys.exit("Script Aborted") def setup(args): args.team_compare, args.player_compare = create_compare_funcs(args) args.action = sc2reader.utils.AttributeDict( type=args.action, run=shutil.copy if args.action == "COPY" else shutil.move ) if not os.path.exists(args.source): msg = "Source does not exist: {0}.\n\nScript Aborted." sys.exit(msg.format(args.source)) elif not os.path.isdir(args.source): msg = "Source is not a directory: {0}.\n\nScript Aborted." sys.exit(msg.format(args.source)) if not os.path.exists(args.dest): if not args.dryrun: os.mkdir(args.dest) else: args.log.write("Creating destination: {0}\n".format(args.dest)) elif not os.path.isdir(args.dest): sys.exit("Destination must be a directory.\n\nScript Aborted") data_file = os.path.join(args.dest, "sc2autosave.dat") args.log.write("Loading state from file: {0}\n".format(data_file)) if os.path.isfile(data_file) and not args.reset: with open(data_file) as file: return cPickle.load(file) else: return sc2reader.utils.AttributeDict(last_sync=0) def save_state(state, args): state.last_sync = time.time() data_file = os.path.join(args.dest, "sc2autosave.dat") if not args.dryrun: with open(data_file, "w") as file: cPickle.dump(state, file) else: args.log.write("Writing state to file: {0}\n".format(data_file)) def main(): parser = argparse.ArgumentParser( description="Automatically copy new replays to directory", fromfile_prefix_chars="@", formatter_class=sc2reader.scripts.utils.Formatter.new(max_help_position=35), epilog="And that's all folks", ) required = parser.add_argument_group("Required Arguments") required.add_argument("source", type=str, help="The source directory to poll") required.add_argument("dest", type=str, help="The destination directory to copy to") general = parser.add_argument_group("General Options") general.add_argument( "--mode", dest="mode", type=str, choices=["BATCH", "CYCLE"], default="BATCH", help="The operating mode for the organizer", ) general.add_argument( "--action", dest="action", choices=["COPY", "MOVE"], default="COPY", type=str, help="Have the organizer move your files instead of copying", ) general.add_argument( "--period", dest="period", type=int, default=0, help="The period of time to wait between scans.", ) general.add_argument( "--log", dest="log", metavar="LOGFILE", type=argparse.FileType("w"), default=sys.stdout, help="Destination file for log information", ) general.add_argument( "--dryrun", dest="dryrun", action="store_true", help="Don't do anything. Only simulate the output", ) general.add_argument( "--reset", dest="reset", action="store_true", default=False, help="Wipe the destination directory clean and start over.", ) fileargs = parser.add_argument_group("File Options") fileargs.add_argument( "--depth", dest="depth", type=int, default=-1, help="Maximum recussion depth. -1 (default) is unlimited.", ) fileargs.add_argument( "--exclude-dirs", dest="exclude_dirs", type=str, metavar="NAME", nargs="+", default=[], help="A list of directory names to exclude during recursion", ) fileargs.add_argument( "--exclude-files", dest="exclude_files", type=str, metavar="REGEX", default="", help="An expression to match excluded files", ) fileargs.add_argument( "--follow-links", dest="follow_links", action="store_true", default=False, help="Enable following of symbolic links while scanning", ) renaming = parser.add_argument_group("Renaming Options") renaming.add_argument( "--rename", dest="rename", type=str, metavar="FORMAT", nargs="?", default="{length} {type} on {map}", help="""\ The renaming format string. can have the following values: * {length} - The length of the replay ([H:]MM:SS) * {type} - The type of the replay (1v1,2v2,4v4,etc) * {map} - The map that was played on. * {match} - Race matchup in team order, alphabetically by race. * {date} - The date the replay was played on * {teams} - The player line up """, ) renaming.add_argument( "--length-format", dest="length_format", type=str, metavar="FORMAT", default="%M.%S", help="The length format string. See the python time module for details", ) renaming.add_argument( "--player-format", dest="player_format", type=str, metavar="FORMAT", default="{name} ({play_race})", help="The player format string used to render the :teams content item.", ) renaming.add_argument( "--date-format", dest="date_format", type=str, metavar="FORMAT", default="%m-%d-%Y", help="The date format string used to render the :date content item.", ) """ renaming.add_argument('--team-order-by', dest='team_order', type=str, metavar='FIELD', default='NUMBER', help='The field by which teams are ordered.') renaming.add_argument('--player-order-by', dest='player_order', type=str, metavar='FIELD', default='NAME', help='The field by which players are ordered on teams.') """ renaming.add_argument( "--favored", dest="favored", type=str, default=[], metavar="NAME", nargs="+", help="A list of the players to favor in ordering teams and players", ) filterargs = parser.add_argument_group("Filtering Options") filterargs.add_argument( "--filter-rule", dest="filter_rule", choices=["ALLOW", "DENY"], help="The filters can either be used as a white list or a black list", ) filterargs.add_argument( "--filter-player", metavar="NAME", dest="filter_player", nargs="+", type=str, default=[], help="A list of players to filter on", ) try: run(parser.parse_args()) except KeyboardInterrupt: print("\n\nScript Interrupted. Process Aborting") if __name__ == "__main__": main()
make_directory
gilded_rose.py
""" An implementation for the gilded rose kata as I understand it. https://github.com/NotMyself/GildedRose """ from collections import namedtuple import unittest as ut from ruleta import Rule, ActionSet from ruleta.combinators import ALSO import re ItemRecord = namedtuple("ItemRecord",["name", "quality", "quality_change", "sellin" ] ) def print_through(label, condition): def print_through_(input_): val=condition(input_) print(label, val) return val return print_through_ def set_quality_change(val): return lambda item_record: item_record._replace(quality_change=val) def sellby_date_passed(item_record): return item_record.sellin <=0 def multiply_quality_change(val): return lambda item_record: item_record._replace(quality_change = item_record.quality_change*val ) def does_item_degrade (item_record): return item_record.quality_change <0 def is_item_conjured(item_record ): return bool(re.match("conjured", item_record.name)) def is_aged_brie(item_record): return item_record.name == "Aged Brie" def is_sulfuras(item_record): return item_record.name == "Sulfuras" def is_backstage_passes(item_record): return item_record.name == "Backstage passes" def days_until_sellby(condition): return lambda item_record: condition(item_record.sellin) def leq(val): return lambda input_ : input_ <= val def geq(val): return lambda input_ : input_ >= val double_degradation = Rule(does_item_degrade, multiply_quality_change(2)) def set_quality(val): return lambda item_record: item_record._replace(quality=val) def do_nothing(item_record): return item_record def compare_quality(condition ): return lambda item_record : condition(item_record.quality) # Rulesets """ The rules as written: ` All items have a SellIn value which denotes the number of days we have to sell the item All items have a Quality value which denotes how valuable the item is At the end of each day our system lowers both values for every item Once the sell by date has passed, Quality degrades twice as fast The Quality of an item is never negative "Aged Brie" actually increases in Quality the older it gets The Quality of an item is never more than 50 "Sulfuras", being a legendary item, never has to be sold or decreases in Quality "Backstage passes", like aged brie, increases in Quality as it's SellIn value approaches; Quality increases by 2 when there are 10 days or less and by 3 when there are 5 days or less but Quality drops to 0 after the concert ' Just for clarification, an item can never have its Quality increase above 50, however "Sulfuras" is a legendary item and as such its Quality is 80 and it never alters. """ """ The Rules as I understand them The basic rules for the quality of all items are: every day the quality degrades by 1 if the sellby date has passed the degradiation is doubled also if the item is conjured the degradiation is doubled also/again but when the item is "Sulfuras" then it quality never changes but also when the item is "Aged Brie" then the quality increases by every day by 1. but also when the item is "Backstage Passes" the the quality changes according to the following rules: the quality increases by 1 every day but if the sell by date is 10 days or less away, the quality increases by 2 each day but if the sell by date is even 5 days or less away the quality increases by 3 each day instead if the sellby date has passed, the quality is zero and never changes
independent of above rules the quality of an item would be below zero it is zero instead. but if the items quality would be above 50 it is 50 instead. but if the item is "Sulfuras" the quality is always 80 """ basic_degradiation_rules= ActionSet(set_quality_change(-1))\ .also(Rule(sellby_date_passed, double_degradation))\ .also(Rule(is_item_conjured, double_degradation)) backstage_pass_rules = ActionSet(set_quality_change(+1))\ .but(Rule( days_until_sellby(leq(10) ), set_quality_change(+2)))\ .but(Rule( days_until_sellby(leq(5) ), set_quality_change(+3)))\ .but(Rule( sellby_date_passed, ALSO(set_quality(0),set_quality_change(0)))) extended_degradiation_rules = ActionSet(basic_degradiation_rules)\ .but(Rule(is_aged_brie, set_quality_change(+1)) )\ .but(Rule(is_sulfuras, set_quality_change(0)))\ .but(Rule( is_backstage_passes, backstage_pass_rules )) bracketing_rules = ActionSet(do_nothing)\ .but(Rule(compare_quality(leq(0)), set_quality(0)))\ .but(ActionSet(Rule(compare_quality(geq(50) ), set_quality(50)))) .but(Rule(is_sulfuras, set_quality(80))) class GildedRose: def __init__(self, items): self._items = items def update_quality(self): for i in range(0,len(self._items)): self._items[i] = self._update_item(self._items[i]) def _update_item(self, item): item_record = extended_degradiation_rules( ItemRecord( item.name, item.quality, 0, item.sellin) ) item_record = bracketing_rules( item_record._replace(quality=item_record.quality+item_record.quality_change ) ) return Item(item_record.name, max(item_record.sellin-1,0), item_record.quality) class Item: def __init__(self, name, sellin, quality): self.name = name self.sellin = sellin self.quality = quality def __repr__(self): return "%s, %s, %s" % (self.name, self.sellin, self.quality) class TestGildedRose(ut.TestCase): def test_standard_item(self): gilded_rose = GildedRose([Item("a Sword", 100, 5)]) gilded_rose.update_quality( ) self.assertEqual( ["a Sword, 99, 4"], list(map(repr,gilded_rose._items))) def test_conjured_item(self): gilded_rose = GildedRose([Item("conjured Sword", 100, 5)]) gilded_rose.update_quality( ) self.assertEqual( ["conjured Sword, 99, 3"], list(map(repr,gilded_rose._items))) def test_minimum_quality(self): gilded_rose = GildedRose([Item("a Sword", 100, 0)]) gilded_rose.update_quality( ) self.assertEqual( ["a Sword, 99, 0"], list(map(repr,gilded_rose._items))) def test_backstage_passes_10_days(self): gilded_rose = GildedRose([Item("Backstage passes", 10, 5)]) gilded_rose.update_quality( ) self.assertEqual( ["Backstage passes, 9, 7"], list(map(repr,gilded_rose._items))) def test_backstage_passes_5_days(self): gilded_rose = GildedRose([Item("Backstage passes", 5, 5)]) gilded_rose.update_quality( ) self.assertEqual( ["Backstage passes, 4, 8"], list(map(repr,gilded_rose._items))) def test_backstage_passes_0_days(self): gilded_rose = GildedRose([Item("Backstage passes", 0, 5)]) gilded_rose.update_quality( ) self.assertEqual( ["Backstage passes, 0, 0"], list(map(repr,gilded_rose._items))) if __name__ == "__main__": ut.main()
_plugin_wrapping.py
# Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import logging import threading import grpc from grpc import _common from grpc._cython import cygrpc class _AuthMetadataContext( collections.namedtuple('AuthMetadataContext', ( 'service_url', 'method_name',)), grpc.AuthMetadataContext): pass class _CallbackState(object): def __init__(self): self.lock = threading.Lock() self.called = False self.exception = None class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback): def __init__(self, state, callback): self._state = state self._callback = callback def __call__(self, metadata, error): with self._state.lock: if self._state.exception is None: if self._state.called: raise RuntimeError( 'AuthMetadataPluginCallback invoked more than once!') else: self._state.called = True else: raise RuntimeError( 'AuthMetadataPluginCallback raised exception "{}"!'.format( self._state.exception)) if error is None: self._callback(metadata, cygrpc.StatusCode.ok, None) else: self._callback(None, cygrpc.StatusCode.internal, _common.encode(str(error))) class _Plugin(object): def
(self, metadata_plugin): self._metadata_plugin = metadata_plugin def __call__(self, service_url, method_name, callback): context = _AuthMetadataContext( _common.decode(service_url), _common.decode(method_name)) callback_state = _CallbackState() try: self._metadata_plugin( context, _AuthMetadataPluginCallback(callback_state, callback)) except Exception as exception: # pylint: disable=broad-except logging.exception( 'AuthMetadataPluginCallback "%s" raised exception!', self._metadata_plugin) with callback_state.lock: callback_state.exception = exception if callback_state.called: return callback(None, cygrpc.StatusCode.internal, _common.encode(str(exception))) def metadata_plugin_call_credentials(metadata_plugin, name): if name is None: try: effective_name = metadata_plugin.__name__ except AttributeError: effective_name = metadata_plugin.__class__.__name__ else: effective_name = name return grpc.CallCredentials( cygrpc.MetadataPluginCallCredentials( _Plugin(metadata_plugin), _common.encode(effective_name)))
__init__
performance.py
import ops.cmd import util.ip DATA_TYPES = ['all', 'browser', 'cache', 'expensive', 'icmp', 'ip', 'jobobject', 'jobobjectdetails', 'logicaldisk', 'memory', 'networkinterface', 'objects', 'pagingfile', 'physicaldisk', 'process', 'processor', 'system', 'tcp', 'telephony', 'terminalservices', 'thread', 'udp'] class PerformanceCommand(ops.cmd.DszCommand, ): def __init__(self, plugin='performance', data=None, objectNumber=None, initialBufferSize=None, bare=False, target=None, **optdict): self.data = data self.objectNumber = objectNumber
ops.cmd.DszCommand.__init__(self, plugin, **optdict) def _getInitialBufferSize(self): return self._opt_initial def _setInitialBufferSize(self, bufferSize): assert ((bufferSize is None) or ((type(bufferSize) is int) and (bufferSize > 0))), 'bufferSize must be an integer greater than zero; or None to clear this option.' self._opt_initial = bufferSize initialBufferSize = property(_getInitialBufferSize, _setInitialBufferSize) def _getObjectNumber(self): return self._opt_objectNumber def _setObjectNumber(self, objectNumber): assert ((objectNumber is None) or ((type(objectNumber) is int) and (objectNumber >= 0))), 'Object number must be a positive integer or zero; or None to clear this option.' self._opt_objectNumber = objectNumber objectNumber = property(_getObjectNumber, _setObjectNumber) def _getData(self): return self._opt_data def _setData(self, data): assert ((type(data) is str) or (type(data) is unicode) or (data is None)), 'Data must be a string value or None to clear this option.' assert ((data is None) or (data.lower() in DATA_TYPES)), 'Data must be one of the valid data type queries.' self._opt_data = data data = property(_getData, _setData) def _getBare(self): return self._opt_bare def _setBare(self, bare): assert (type(bare) is bool), 'Bare must be Boolean.' self._opt_bare = bare bare = property(_getBare, _setBare) def _getTarget(self): return self._opt_target def _setTarget(self, target): assert ((type(target) is str) or (type(target) is unicode) or (target is None)), 'Target must be a string representation or None to clear.' assert ((target is None) or util.ip.validate(target)), 'Target address must be a valid IPv4 or IPv6 address.' self._opt_target = target target = property(_getTarget, _setTarget) def validateInput(self): if ((self.data is not None) and (self.objectNumber is not None)): return False if ((self.data is None) and (self.objectNumber is None)): return False return True def __str__(self): cmdstr = u'' for prefix in self.prefixes: cmdstr += ('%s ' % prefix) cmdstr += (self.plugin + ' ') if self.initialBufferSize: cmdstr += ('-initial %s ' % self.initalBufferSize) if self.objectNumber: cmdstr += ('-objectnum %s ' % self.objectNumber) if self.data: cmdstr += ('-data %s ' % self.data) if self.bare: cmdstr += '-bare ' if self.target: cmdstr += ('-target %s ' % self.target) return ops.utf8(cmdstr) ops.cmd.command_classes['performance'] = PerformanceCommand
self.initialBufferSize = initialBufferSize self.bare = bare self.target = target
index.js
import * as Interrupt from './interrupts'; export {default as CPU} from './CPU'; export {Interrupt};
package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import sys class
(CMakePackage, CudaPackage): """High-performance linear algebra library for manycore systems, with a focus on sparse solution of linear systems.""" homepage = "https://ginkgo-project.github.io/" git = "https://github.com/ginkgo-project/ginkgo.git" maintainers = ['tcojean', 'hartwiganzt'] version('develop', branch='develop') version('master', branch='master') version('1.3.0', commit='4678668c66f634169def81620a85c9a20b7cec78') # v1.3.0 version('1.2.0', commit='b4be2be961fd5db45c3d02b5e004d73550722e31') # v1.2.0 version('1.1.1', commit='08d2c5200d3c78015ac8a4fd488bafe1e4240cf5') # v1.1.1 version('1.1.0', commit='b9bec8225442b3eb2a85a870efa112ab767a17fb') # v1.1.0 version('1.0.0', commit='45244641e0c2b19ba33aecd25153c0bddbcbe1a0') # v1.0.0 variant('shared', default=True, description='Build shared libraries') variant('full_optimizations', default=False, description='Compile with all optimizations') variant('openmp', default=sys.platform != 'darwin', description='Build with OpenMP') variant('develtools', default=False, description='Compile with develtools enabled') variant('build_type', default='Release', description='The build type to build', values=('Debug', 'Release')) variant('hip', default=False, description='Compile Ginkgo with HIP support') depends_on('[email protected]:', type='build') depends_on('cuda@9:', when='+cuda') depends_on('hip', when='+hip') depends_on('hipsparse', type="link", when='+hip') depends_on('hipblas', type="link", when='+hip') depends_on('rocrand', type="link", when='@develop+hip') depends_on('rocthrust', type="build", when='+hip') # Somehow, these dependencies not propagated by the HIP stack? depends_on('rocm-device-libs', type="link", when='+hip') depends_on('comgr', type="link", when='+hip') conflicts('%gcc@:5.2.9') conflicts("+hip", when="@:1.1.1") # The HIP packages from spack doen't seem to work well with CUDA # backend for now, so disable HIP with CUDA backend. conflicts("+cuda", when="+hip") def cmake_args(self): # Check that the have the correct C++ standard is available if self.spec.satisfies('@:1.2.0'): try: self.compiler.cxx11_flag except UnsupportedCompilerFlag: InstallError('Ginkgo requires a C++11-compliant C++ compiler') else: try: self.compiler.cxx14_flag except UnsupportedCompilerFlag: InstallError('Ginkgo requires a C++14-compliant C++ compiler') spec = self.spec args = [ '-DGINKGO_BUILD_CUDA=%s' % ('ON' if '+cuda' in spec else 'OFF'), '-DGINKGO_BUILD_OMP=%s' % ('ON' if '+openmp' in spec else 'OFF'), '-DBUILD_SHARED_LIBS=%s' % ('ON' if '+shared' in spec else 'OFF'), '-DGINKGO_JACOBI_FULL_OPTIMIZATIONS=%s' % ( 'ON' if '+full_optimizations' in spec else 'OFF'), '-DGINKGO_DEVEL_TOOLS=%s' % ( 'ON' if '+develtools' in spec else 'OFF'), '-DGINKGO_BUILD_HIP=%s' % ('ON' if '+hip' in spec else 'OFF'), # As we are not exposing benchmarks, examples, tests nor doc # as part of the installation, disable building them altogether. '-DGINKGO_BUILD_BENCHMARKS=OFF', '-DGINKGO_BUILD_DOC=OFF', '-DGINKGO_BUILD_EXAMPLES=OFF', '-DGINKGO_BUILD_TESTS=OFF' ] if '+hip' in spec: args.append('-DHIP_PATH={0}'. format(spec['hip'].prefix)) args.append('-DHIP_CLANG_PATH={0}/bin'. format(spec['llvm-amdgpu'].prefix)) args.append('-DHIP_CLANG_INCLUDE_PATH={0}/include'. format(spec['llvm-amdgpu'].prefix)) args.append('-DHIPSPARSE_PATH={0}'. format(spec['hipsparse'].prefix)) args.append('-DHIPBLAS_PATH={0}'. format(spec['hipblas'].prefix)) return args
Ginkgo
iit.js
var banner = require('./banner') var iit = module.exports = banner.submodule('iit') .fullName('Illinois Institute of Technology') .location('Chicago IL 60647') .timezone('America/New_York') .uses('banner') .configure({
, dummy: true }) .rootUrl('my102.iit.edu') .pagePaths({ termList: "/banr/bwckschd.p_disp_dyn_sched" , term: "/banr/bwckgens.p_proc_term_date" , listing: "/banr/bwckschd.p_get_crse_unsec" , details: "/banr/bwckschd.p_disp_detail_sched" }) .debug(true)
seperateDepartments: true , seatsListedWithSections: false , sectionDetailsOnCrawl: false
period.ts
import { Period, User } from '@prisma/client'; import request from 'supertest'; import { encodeToken } from '~/app/utils/auth'; import App from '~/App';
interface GeneratePeriodParams { name?: string; } interface CreatePeriodParams extends GeneratePeriodParams { adminUser: User; } export function generatePeriod(params?: GeneratePeriodParams) { return { name: 'Manhã', ...params, }; } export async function createPeriod(params: CreatePeriodParams) { const { adminUser } = params; const periodData = generatePeriod(params); const adminToken = encodeToken(adminUser); const response = await request(App) .post('/periods') .send(periodData) .set({ authorization: `Bearer ${adminToken}`, }); return response.body as Period; }
sudoku_skeleton.py
import copy ########################################### This function reads in block from file def grid_from_file(file_name): lst=[] f=open(file_name) for line in f: lst2=[] for i in line: if i == "x": lst2.append(i) elif i.isdigit(): lst2.append(int(i))
return lst ################################################## def subgrid_values(grid, row, col): val = [] #get dimension of inner box n = int(len(grid)**(0.5)) #get starting row and starting col r = (row//n)*n c = (col//n)*n for i in range(r, r+n): for j in range(c, c+n): val.append(grid[i][j]) return val # This returns a list of values in the subgrid def column_values(grid,col): val=[] n=len(grid) for i in range(n): val.append(grid[i][col]) return val #This returns a list of values in the column def row_values(grid,row): val=[] n=len(grid) for i in range(n): val.append(grid[row][i]) return val #This returns a list of values in the row ################################################# def valid_entry(grid,num,r,c): check1=num not in column_values(grid,c) check2=num not in row_values(grid,r) check3=num not in subgrid_values(grid,r,c) if check1 and check2 and check3: return True return False ################################################### def grids_augmented_in_row(grid,num,r): #Write code here #################################################### This should recursively use the function above to def grids_augmented_with_number(grid,num): #Write code here #################################################### This should recursively use the function above to complete the sudoku puzzle. def solve_sudoku(file_name): #Write code here #################################################### this makes a function that chooses sudoku block based of numbers def block_selector(number): if number==1: return "gridA.txt" elif number==2: return "gridB.txt" elif number==3: return "gridC.txt" elif number==4: return "gridD.txt" elif number==5: return "gridE.txt" elif number==6: return "gridF.txt" else: return Null #################################################### this solves the block, change number variable to swap blocks number=6 print(solve_sudoku(block_selector(number)))
lst.append(lst2)
remote-load-invoices.ts
import { HttpClient, HttpStatusCode } from '@/data/protocols/http' import { LoadInvoices } from '@/domain/usecases' import { ServerError, UnauthorizedError } from '@/presentation/errors' export class RemoteLoadInvoices implements LoadInvoices { constructor ( private readonly url: string, private readonly httpClient: HttpClient<RemoteLoadInvoices.Model[]>
async loadAll (): Promise<RemoteLoadInvoices.Model[]> { const httpResponse = await this.httpClient.request({ url: this.url, method: 'get' }) const remoteInvoices = httpResponse.body || [] switch (httpResponse.statusCode) { case HttpStatusCode.ok: return remoteInvoices case HttpStatusCode.unauthorized: throw new UnauthorizedError() default: throw new ServerError() } } } export namespace RemoteLoadInvoices { export type Model = { id: string invoiceNo: number invoiceDate: number invoiceValue: number description: string } }
) {}
templates.go
/* Copyright 2019 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package templates import ( "strings" ctrl "sigs.k8s.io/controller-runtime" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane/apis/stacks/v1alpha1" ) // SetupStackDefinitions adds a controller that reconciles StackDefinitions. func SetupStackDefinitions(mgr ctrl.Manager, l logging.Logger) error { name := "stacks/" + strings.ToLower(v1alpha1.StackDefinitionGroupKind) return ctrl.NewControllerManagedBy(mgr). Named(name). For(&v1alpha1.StackDefinition{}).
Complete(NewStackDefinitionReconciler(mgr.GetClient(), l.WithValues("controller", "stackconfiguration"))) }
PlotNFTUnconfirmedCard.tsx
import React, { useEffect } from 'react'; import styled from 'styled-components'; import { Trans } from '@lingui/macro'; import { Flex, Link, Loading } from '@ecostake/core'; import { Box, Card, CardContent, Typography } from '@material-ui/core'; import type UnconfirmedPlotNFT from '../../types/UnconfirmedPlotNFT'; import useTransaction from '../../hooks/useTransaction'; import PlotNFTState from '../../constants/PlotNFTState'; import useUnconfirmedPlotNFTs from '../../hooks/useUnconfirmedPlotNFTs'; const StyledCard = styled(Card)` display: flex;
`; const StyledCardContent = styled(CardContent)` display: flex; flex-direction: column; flex-grow: 1; `; type Props = { unconfirmedPlotNFT: UnconfirmedPlotNFT; }; export default function PlotNFTUnconfirmedCard(props: Props) { const { unconfirmedPlotNFT: { transactionId, state, poolUrl }, } = props; const { remove } = useUnconfirmedPlotNFTs(); const [transaction] = useTransaction(transactionId); useEffect(() => { if (transaction?.confirmed) { remove(transaction.name); } }, [transaction?.confirmed]); return ( <StyledCard> <StyledCardContent> <Flex flexDirection="column" gap={4} flexGrow={1}> <Box> <Typography variant="h6" align="center"> {state === PlotNFTState.SELF_POOLING ? ( <Trans>Creating Plot NFT for Self Pooling</Trans> ) : ( <Trans>Creating Plot NFT and Joining the Pool</Trans> )} </Typography> {state === PlotNFTState.FARMING_TO_POOL && ( <Flex alignItems="center" gap={1} justifyContent="center"> <Typography variant="body2" color="textSecondary"> <Trans>Pool:</Trans> </Typography> <Link target="_blank" href={poolUrl}> {poolUrl} </Link> </Flex> )} </Box> <Flex flexGrow={1} alignItems="center" justifyContent="center" flexDirection="column" gap={2} > <Loading /> <Typography variant="body2" align="center"> <Trans>Waiting for the transaction to be confirmed</Trans> </Typography> </Flex> </Flex> </StyledCardContent> </StyledCard> ); }
flex-direction: column; height: 100%; min-height: 388px;
writesetfield.py
# Copyright 1997 - 2018 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files class WriteSetField(Base): """The WriteSetField class encapsulates a required writeSetField node in the ixnetwork hierarchy. An instance of the class can be obtained by accessing the WriteSetField property from a parent instance. The internal properties list will contain one and only one set of properties which is populated when the property is accessed. """ _SDM_NAME = 'writeSetField' def __init__(self, parent): super(WriteSetField, self).__init__(parent) @property def ArpDestinationHardwareAddress(self): """If selected, Write Set Field for ARP Destination Hardware Address is supported. Returns: bool """ return self._get_attribute('arpDestinationHardwareAddress') @ArpDestinationHardwareAddress.setter def ArpDestinationHardwareAddress(self, value): self._set_attribute('arpDestinationHardwareAddress', value) @property def ArpDestinationIpv4Address(self): """If selected, Write Set Field for ARP Destination IPv4 Address is supported. Returns: bool """ return self._get_attribute('arpDestinationIpv4Address') @ArpDestinationIpv4Address.setter def ArpDestinationIpv4Address(self, value): self._set_attribute('arpDestinationIpv4Address', value) @property def ArpOpcode(self): """If selected, Write Set Field for ARP Opcode is supported. Returns: bool """ return self._get_attribute('arpOpcode') @ArpOpcode.setter def ArpOpcode(self, value): self._set_attribute('arpOpcode', value) @property def ArpSourceHardwareAddress(self): """If selected, Write Set Field for ARP Source Hardware Address is supported. Returns: bool """ return self._get_attribute('arpSourceHardwareAddress') @ArpSourceHardwareAddress.setter def ArpSourceHardwareAddress(self, value): self._set_attribute('arpSourceHardwareAddress', value) @property def ArpSourceIpv4Address(self): """If selected, Write Set Field for ARP Source IPv4 Address is supported. Returns: bool """ return self._get_attribute('arpSourceIpv4Address') @ArpSourceIpv4Address.setter def ArpSourceIpv4Address(self, value): self._set_attribute('arpSourceIpv4Address', value) @property def EthernetDestination(self): """If selected, Write Set Field for Ethernet Destination is supported. Returns: bool """ return self._get_attribute('ethernetDestination') @EthernetDestination.setter def EthernetDestination(self, value): self._set_attribute('ethernetDestination', value) @property def EthernetSource(self): """If selected, Write Set Field for Ethernet Source is supported. Returns: bool """ return self._get_attribute('ethernetSource') @EthernetSource.setter def EthernetSource(self, value): self._set_attribute('ethernetSource', value) @property def EthernetType(self): """If selected, Write Set Field for Ethernet Type is supported. Returns: bool """ return self._get_attribute('ethernetType') @EthernetType.setter def EthernetType(self, value): self._set_attribute('ethernetType', value) @property def IcmpCode(self): """If selected, Write Set Field for ICMP Code is supported. Returns: bool """ return self._get_attribute('icmpCode') @IcmpCode.setter def IcmpCode(self, value): self._set_attribute('icmpCode', value) @property def IcmpType(self): """If selected, Write Set Field for ICMP Type is supported. Returns: bool """ return self._get_attribute('icmpType') @IcmpType.setter def IcmpType(self, value): self._set_attribute('icmpType', value) @property def Icmpv6Code(self): """If selected, Write Set Field for ICMPv6 Code is supported. Returns: bool """ return self._get_attribute('icmpv6Code') @Icmpv6Code.setter def Icmpv6Code(self, value): self._set_attribute('icmpv6Code', value) @property def Icmpv6Type(self): """If selected, Write Set Field for ICMPv6 Type is supported. Returns: bool """ return self._get_attribute('icmpv6Type') @Icmpv6Type.setter def Icmpv6Type(self, value): self._set_attribute('icmpv6Type', value) @property def IpDscp(self): """If selected, Write Set Field for IP DSCP is supported. Returns: bool """ return self._get_attribute('ipDscp') @IpDscp.setter def IpDscp(self, value): self._set_attribute('ipDscp', value) @property def IpEcn(self): """If selected, Write Set Field for IP ECN is supported. Returns: bool """ return self._get_attribute('ipEcn') @IpEcn.setter def IpEcn(self, value): self._set_attribute('ipEcn', value) @property def IpProtocol(self): """If selected, Write Set Field for IP Protocol is supported. Returns: bool """ return self._get_attribute('ipProtocol') @IpProtocol.setter def IpProtocol(self, value): self._set_attribute('ipProtocol', value) @property def Ipv4Destination(self): """If selected, Write Set Field for IPv4 Destination is supported. Returns: bool """ return self._get_attribute('ipv4Destination') @Ipv4Destination.setter def Ipv4Destination(self, value): self._set_attribute('ipv4Destination', value) @property def Ipv4Source(self): """If selected, Write Set Field for IPv4 Source is supported. Returns: bool """ return self._get_attribute('ipv4Source') @Ipv4Source.setter def Ipv4Source(self, value): self._set_attribute('ipv4Source', value) @property def Ipv6Destination(self): """If selected, Write Set Field for IPv6 Destination is supported. Returns: bool """ return self._get_attribute('ipv6Destination') @Ipv6Destination.setter def Ipv6Destination(self, value): self._set_attribute('ipv6Destination', value) @property def Ipv6ExtHeader(self): """If selected, Write Set Field for IPv6 Ext Header is supported. Returns: bool """ return self._get_attribute('ipv6ExtHeader') @Ipv6ExtHeader.setter def Ipv6ExtHeader(self, value): self._set_attribute('ipv6ExtHeader', value) @property def Ipv6FlowLabel(self): """If selected, Write Set Field for IPv6 Flow Label is supported. Returns: bool """ return self._get_attribute('ipv6FlowLabel') @Ipv6FlowLabel.setter def Ipv6FlowLabel(self, value): self._set_attribute('ipv6FlowLabel', value) @property def Ipv6NdSll(self): """If selected, Write Set Field for IPv6 ND SLL is supported. Returns: bool """ return self._get_attribute('ipv6NdSll') @Ipv6NdSll.setter def Ipv6NdSll(self, value): self._set_attribute('ipv6NdSll', value) @property def Ipv6NdTarget(self): """If selected, Write Set Field for IPv6 ND Target is supported. Returns: bool """ return self._get_attribute('ipv6NdTarget') @Ipv6NdTarget.setter def Ipv6NdTarget(self, value): self._set_attribute('ipv6NdTarget', value) @property def Ipv6NdTll(self): """If selected, Write Set Field for IPv6 ND TLL is supported. Returns: bool """ return self._get_attribute('ipv6NdTll') @Ipv6NdTll.setter def Ipv6NdTll(self, value): self._set_attribute('ipv6NdTll', value) @property def Ipv6Source(self): """If selected, Write Set Field for IPv6 Source is supported. Returns: bool """ return self._get_attribute('ipv6Source') @Ipv6Source.setter def Ipv6Source(self, value): self._set_attribute('ipv6Source', value) @property def MplsBos(self): """If selected, Write Set Field for MPLS BoS is supported. Returns: bool """ return self._get_attribute('mplsBos') @MplsBos.setter def MplsBos(self, value): self._set_attribute('mplsBos', value) @property def MplsLabel(self): """If selected, Write Set Field for MPLS Label is supported.
return self._get_attribute('mplsLabel') @MplsLabel.setter def MplsLabel(self, value): self._set_attribute('mplsLabel', value) @property def MplsTc(self): """If selected, Write Set Field for MPLS TC is supported. Returns: bool """ return self._get_attribute('mplsTc') @MplsTc.setter def MplsTc(self, value): self._set_attribute('mplsTc', value) @property def PbbIsid(self): """If selected, Write Set Field for PBB ISID is supported. Returns: bool """ return self._get_attribute('pbbIsid') @PbbIsid.setter def PbbIsid(self, value): self._set_attribute('pbbIsid', value) @property def SctpDestination(self): """If selected, Write Set Field for SCTP Destination is supported. Returns: bool """ return self._get_attribute('sctpDestination') @SctpDestination.setter def SctpDestination(self, value): self._set_attribute('sctpDestination', value) @property def SctpSource(self): """If selected, Write Set Field for SCTP Source is supported. Returns: bool """ return self._get_attribute('sctpSource') @SctpSource.setter def SctpSource(self, value): self._set_attribute('sctpSource', value) @property def TcpDestination(self): """If selected, Write Set Field for TCP Destination is supported. Returns: bool """ return self._get_attribute('tcpDestination') @TcpDestination.setter def TcpDestination(self, value): self._set_attribute('tcpDestination', value) @property def TcpSource(self): """If selected, Write Set Field for TCP Source is supported. Returns: bool """ return self._get_attribute('tcpSource') @TcpSource.setter def TcpSource(self, value): self._set_attribute('tcpSource', value) @property def TunnelId(self): """If selected, Write Set Field for Tunnel ID is supported. Returns: bool """ return self._get_attribute('tunnelId') @TunnelId.setter def TunnelId(self, value): self._set_attribute('tunnelId', value) @property def UdpDestination(self): """If selected, Write Set Field for UDP Destination is supported. Returns: bool """ return self._get_attribute('udpDestination') @UdpDestination.setter def UdpDestination(self, value): self._set_attribute('udpDestination', value) @property def UdpSource(self): """If selected, Write Set Field for UDP Source is supported. Returns: bool """ return self._get_attribute('udpSource') @UdpSource.setter def UdpSource(self, value): self._set_attribute('udpSource', value) @property def VlanId(self): """If selected, Write Set Field for VLAN ID is supported. Returns: bool """ return self._get_attribute('vlanId') @VlanId.setter def VlanId(self, value): self._set_attribute('vlanId', value) @property def VlanPriority(self): """If selected, Write Set Field for VLAN Priority is supported. Returns: bool """ return self._get_attribute('vlanPriority') @VlanPriority.setter def VlanPriority(self, value): self._set_attribute('vlanPriority', value)
Returns: bool """
fmt.rs
use criterion::{criterion_group, criterion_main, Criterion}; use std::fmt::{Display, Write}; use twilight_mention::fmt::Mention; use twilight_model::id::{ marker::{ChannelMarker, EmojiMarker, RoleMarker, UserMarker}, Id, }; fn format_id<T: Display>(input: &mut String, formatter: &T) { input.write_fmt(format_args!("{formatter}")).unwrap(); } fn criterion_benchmark(c: &mut Criterion) { c.bench_function("format channel id", |b| { let mut string = String::new(); let formatter = Id::<ChannelMarker>::new(999_999_999_999_999_999).mention(); b.iter(|| format_id(&mut string, &formatter)) }); c.bench_function("format emoji id", |b| {
let formatter = Id::<EmojiMarker>::new(999_999_999_999_999_999).mention(); b.iter(|| format_id(&mut string, &formatter)) }); c.bench_function("format role id", |b| { let mut string = String::new(); let formatter = Id::<RoleMarker>::new(999_999_999_999_999_999).mention(); b.iter(|| format_id(&mut string, &formatter)) }); c.bench_function("format user id", |b| { let mut string = String::new(); let formatter = Id::<UserMarker>::new(999_999_999_999_999_999).mention(); b.iter(|| format_id(&mut string, &formatter)) }); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches);
let mut string = String::new();
test.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from parlai.utils.testing import AutoTeacherTest class TestDefaultTeacher(AutoTeacherTest): task = "squad" class TestIndexTeacher(AutoTeacherTest): task = "squad:index" class TestOpensquadTeacher(AutoTeacherTest):
class TestFulldocTeacher(AutoTeacherTest): task = "squad:fulldoc" class TestSentenceTeacher(AutoTeacherTest): task = "squad:sentence" class TestFulldocsentenceTeacher(AutoTeacherTest): task = "squad:fulldocsentence"
task = "squad:opensquad"
plugin_mapper.go
// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import endorsement "github.com/paul-lee-attorney/fabric-2.1-gm/core/handlers/endorsement/api" import endorser "github.com/paul-lee-attorney/fabric-2.1-gm/core/endorser" import mock "github.com/stretchr/testify/mock" // PluginMapper is an autogenerated mock type for the PluginMapper type type PluginMapper struct { mock.Mock } // PluginFactoryByName provides a mock function with given fields: name func (_m *PluginMapper) PluginFactoryByName(name endorser.PluginName) endorsement.PluginFactory { ret := _m.Called(name) var r0 endorsement.PluginFactory if rf, ok := ret.Get(0).(func(endorser.PluginName) endorsement.PluginFactory); ok { r0 = rf(name) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(endorsement.PluginFactory) } } return r0
}
validator-builder.ts
import { IsDefined, IsOptional } from 'class-validator';
export interface Validating { optional(): this; } // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types export function ValidatorBuilder<B extends Constructor<DecoratorBuilder>>(Base: B) { return class ValidatorMixin extends Base implements Validating { /* Mark property as optional. */ public optional(): this { this.add(IsOptional()); return this; } /* Mark property as required. */ public required(): this { this.add(IsDefined()); return this; } public maybeRequired(): this { if (this.options.optional) { return this.optional(); } return this.required(); } }; }
import { Constructor, DecoratorBuilder } from './base-builder';
summaries.py
import os import torch from torchvision.utils import make_grid from tensorboardX import SummaryWriter from dataloaders.utils import decode_seg_map_sequence class TensorboardSummary(object): def
(self, directory): self.directory = directory def create_summary(self): writer = SummaryWriter(log_dir=os.path.join(self.directory)) return writer def visualize_image(self, writer, dataset, image, target, output, global_step): grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True) writer.add_image('Image', grid_image, global_step) grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255)) writer.add_image('Predicted label', grid_image, global_step) grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255)) writer.add_image('Groundtruth label', grid_image, global_step)
__init__
xy_axes.js
/** * DevExtreme (esm/viz/axes/xy_axes.js) * Version: 21.2.5 * Build date: Mon Jan 17 2022 * * Copyright (c) 2012 - 2022 Developer Express Inc. ALL RIGHTS RESERVED * Read about DevExtreme licensing here: https://js.devexpress.com/Licensing/ */ import { Range } from "../translators/range"; import formatHelper from "../../format_helper"; import dateUtils from "../../core/utils/date"; import { extend } from "../../core/utils/extend"; import { generateDateBreaks } from "./datetime_breaks"; import { noop } from "../../core/utils/common"; import { getLog, patchFontOptions, getCosAndSin } from "../core/utils"; import { isDefined } from "../../core/utils/type"; import constants from "./axes_constants"; var getNextDateUnit = dateUtils.getNextDateUnit; var correctDateWithUnitBeginning = dateUtils.correctDateWithUnitBeginning; var _math = Math; var _max = _math.max; var TOP = constants.top; var BOTTOM = constants.bottom; var LEFT = constants.left; var RIGHT = constants.right; var CENTER = constants.center; var SCALE_BREAK_OFFSET = 3; var RANGE_RATIO = .3; var WAVED_LINE_CENTER = 2; var WAVED_LINE_TOP = 0; var WAVED_LINE_BOTTOM = 4; var WAVED_LINE_LENGTH = 24; var TICKS_CORRECTIONS = { left: -1, top: -1, right: 0, bottom: 0, center: -.5 }; function prepareDatesDifferences(datesDifferences, tickInterval) { var dateUnitInterval; var i; if ("week" === tickInterval) { tickInterval = "day" } if ("quarter" === tickInterval) { tickInterval = "month" } if (datesDifferences[tickInterval]) { for (i = 0; i < dateUtils.dateUnitIntervals.length; i++) { dateUnitInterval = dateUtils.dateUnitIntervals[i]; if (datesDifferences[dateUnitInterval]) { datesDifferences[dateUnitInterval] = false; datesDifferences.count-- } if (dateUnitInterval === tickInterval) { break } } } } function sortingBreaks(breaks) { return breaks.sort((function(a, b) { return a.from - b.from })) } function getMarkerDates(min, max, markerInterval) { var origMin = min; var dates; min = correctDateWithUnitBeginning(min, markerInterval); max = correctDateWithUnitBeginning(max, markerInterval); dates = dateUtils.getSequenceByInterval(min, max, markerInterval); if (dates.length && origMin > dates[0]) { dates = dates.slice(1) } return dates } function getStripHorizontalAlignmentPosition(alignment) { var position = "start"; if ("center" === alignment) { position = "center" } if ("right" === alignment) { position = "end" } return position } function getStripVerticalAlignmentPosition(alignment) { var position = "start"; if ("center" === alignment) { position = "center" } if ("bottom" === alignment) { position = "end" } return position } function getMarkerInterval(tickInterval) { var markerInterval = getNextDateUnit(tickInterval); if ("quarter" === markerInterval) { markerInterval = getNextDateUnit(markerInterval) } return markerInterval } function getMarkerFormat(curDate, prevDate, tickInterval, markerInterval) { var format = markerInterval; var datesDifferences = prevDate && dateUtils.getDatesDifferences(prevDate, curDate); if (prevDate && "year" !== tickInterval) { prepareDatesDifferences(datesDifferences, tickInterval); format = formatHelper.getDateFormatByDifferences(datesDifferences) } return format } function getMaxSide(act, boxes) { return boxes.reduce((function(prevValue, box) { return _max(prevValue, act(box)) }), 0) } function getDistanceByAngle(bBox, rotationAngle) { rotationAngle = _math.abs(rotationAngle); rotationAngle = rotationAngle % 180 >= 90 ? 90 - rotationAngle % 90 : rotationAngle % 90; var a = rotationAngle * (_math.PI / 180); if (a >= _math.atan(bBox.height / bBox.width)) { return bBox.height / _math.abs(_math.sin(a)) } else { return bBox.width } } function getMaxConstantLinePadding(constantLines) { return constantLines.reduce((function(padding, options) { return _max(padding, options.paddingTopBottom) }), 0) } function getConstantLineLabelMarginForVerticalAlignment(constantLines, alignment, labelHeight) { return constantLines.some((function(options) { return options.label.verticalAlignment === alignment })) && labelHeight || 0 } function getLeftMargin(bBox) { return _math.abs(bBox.x) || 0 } function getRightMargin(bBox) { return _math.abs(bBox.width - _math.abs(bBox.x)) || 0 } function generateRangesOnPoints(points, edgePoints, getRange) { var i; var length; var maxRange = null; var ranges = []; var curValue; var prevValue; var curRange; for (i = 1, length = points.length; i < length; i++) { curValue = points[i]; prevValue = points[i - 1]; curRange = getRange(curValue, prevValue); if (edgePoints.indexOf(curValue) >= 0) { if (!maxRange || curRange > maxRange.length) { maxRange = { start: curValue, end: prevValue, length: curRange } } } else { if (maxRange && curRange < maxRange.length) { ranges.push(maxRange) } else { ranges.push({ start: curValue, end: prevValue, length: curRange }) } maxRange = null } } if (maxRange) { ranges.push(maxRange) } return ranges } function generateAutoBreaks(_ref, series, _ref2) { var { logarithmBase: logarithmBase, type: type, maxAutoBreakCount: maxAutoBreakCount } = _ref; var { minVisible: minVisible, maxVisible: maxVisible } = _ref2; var breaks = []; var getRange = "logarithmic" === type ? (min, max) => getLog(max / min, logarithmBase) : (min, max) => max - min; var visibleRange = getRange(minVisible, maxVisible); var points = series.reduce((result, s) => { var points = s.getPointsInViewPort(); result[0] = result[0].concat(points[0]); result[1] = result[1].concat(points[1]); return result }, [ [], [] ]); var sortedAllPoints = points[0].concat(points[1]).sort((a, b) => b - a); var edgePoints = points[1].filter(p => points[0].indexOf(p) < 0); var minDiff = RANGE_RATIO * visibleRange; var ranges = generateRangesOnPoints(sortedAllPoints, edgePoints, getRange).sort((a, b) => b.length - a.length); var epsilon = _math.min.apply(null, ranges.map(r => r.length)) / 1e3; var _maxAutoBreakCount = isDefined(maxAutoBreakCount) ? _math.min(maxAutoBreakCount, ranges.length) : ranges.length; for (var i = 0; i < _maxAutoBreakCount; i++) { if (ranges[i].length >= minDiff) { if (visibleRange <= ranges[i].length) { break } visibleRange -= ranges[i].length; if (visibleRange > epsilon || visibleRange < -epsilon) { breaks.push({ from: ranges[i].start, to: ranges[i].end }); minDiff = RANGE_RATIO * visibleRange } } else { break } } sortingBreaks(breaks); return breaks } export default { linear: { _getStep: function(boxes, rotationAngle) { var spacing = this._options.label.minSpacing; var func = this._isHorizontal ? function(box) { return box.width + spacing } : function(box) { return box.height }; var maxLabelLength = getMaxSide(func, boxes); if (rotationAngle) { maxLabelLength = getDistanceByAngle({ width: maxLabelLength, height: this._getMaxLabelHeight(boxes, 0) }, rotationAngle) } return constants.getTicksCountInRange(this._majorTicks, this._isHorizontal ? "x" : "y", maxLabelLength) }, _getMaxLabelHeight: function(boxes, spacing) { return getMaxSide((function(box) { return box.height }), boxes) + spacing }, _validateOverlappingMode: function(mode, displayMode) { if (this._isHorizontal && ("rotate" === displayMode || "stagger" === displayMode) || !this._isHorizontal) { return constants.validateOverlappingMode(mode) } return mode }, _validateDisplayMode: function(mode) { return this._isHorizontal ? mode : "standard" }, getMarkerTrackers: function() { return this._markerTrackers }, _getSharpParam: function(opposite) { return this._isHorizontal ^ opposite ? "h" : "v" }, _createAxisElement: function() { return this._renderer.path([], "line") }, _updateAxisElementPosition: function() { var axisCoord = this._axisPosition; var canvas = this._getCanvasStartEnd(); this._axisElement.attr({ points: this._isHorizontal ? [canvas.start, axisCoord, canvas.end, axisCoord] : [axisCoord, canvas.start, axisCoord, canvas.end] }) }, _getTranslatedCoord: function(value, offset) { return this._translator.translate(value, offset) }, _initAxisPositions() { if (this.customPositionIsAvailable()) { this._customBoundaryPosition = this.getCustomBoundaryPosition() } if (!this.customPositionIsAvailable() || this.customPositionIsBoundary()) { this._axisPosition = this.getPredefinedPosition(this.getResolvedBoundaryPosition()) } else { this._axisPosition = this.getCustomPosition() } }, _getTickMarkPoints(coords, length, tickOptions) { var isHorizontal = this._isHorizontal; var tickOrientation = this._options.tickOrientation; var labelPosition = this._options.label.position; var tickStartCoord; if (isDefined(tickOrientation)) { tickStartCoord = TICKS_CORRECTIONS[tickOrientation] * length } else { var shift = tickOptions.shift || 0; if (!isHorizontal && labelPosition === LEFT || isHorizontal && labelPosition !== BOTTOM) { shift = -shift } tickStartCoord = shift + this.getTickStartPositionShift(length) } return [coords.x + (isHorizontal ? 0 : tickStartCoord), coords.y + (isHorizontal ? tickStartCoord : 0), coords.x + (isHorizontal ? 0 : tickStartCoord + length), coords.y + (isHorizontal ? tickStartCoord + length : 0)] }, getTickStartPositionShift(length) { var width = this._options.width; var position = this.getResolvedBoundaryPosition(); return length % 2 === 1 ? width % 2 === 0 && (position === LEFT || position === TOP) || width % 2 === 1 && (position === RIGHT || position === BOTTOM) && !this.hasNonBoundaryPosition() ? Math.floor(-length / 2) : -Math.floor(length / 2) : -length / 2 + (width % 2 === 0 ? 0 : position === BOTTOM || position === RIGHT ? -1 : 1) }, _getTitleCoords: function() { var horizontal = this._isHorizontal; var x = this._axisPosition; var y = this._axisPosition; var align = this._options.title.alignment; var canvas = this._getCanvasStartEnd(); var fromStartToEnd = horizontal || this._options.position === LEFT; var canvasStart = fromStartToEnd ? canvas.start : canvas.end; var canvasEnd = fromStartToEnd ? canvas.end : canvas.start; var coord = align === LEFT ? canvasStart : align === RIGHT ? canvasEnd : canvas.start + (canvas.end - canvas.start) / 2; if (horizontal) { x = coord } else { y = coord } return { x: x, y: y } }, _drawTitleText: function(group, coords) { var options = this._options; var titleOptions = options.title; var attrs = { opacity: titleOptions.opacity, align: titleOptions.alignment, class: titleOptions.cssClass }; if (!titleOptions.text || !group) { return } coords = coords || this._getTitleCoords(); if (!this._isHorizontal) { attrs.rotate = options.position === LEFT ? 270 : 90 } var text = this._renderer.text(titleOptions.text, coords.x, coords.y).css(patchFontOptions(titleOptions.font)).attr(attrs).append(group); this._checkTitleOverflow(text); return text }, _updateTitleCoords: function() { this._title && this._title.element.attr(this._getTitleCoords()) }, _drawTitle: function() { var title = this._drawTitleText(this._axisTitleGroup); if (title) { this._title = { element: title } } }, _measureTitle: function() { if (this._title) { if (this._title.bBox && !this._title.originalSize) { this._title.originalSize = this._title.bBox } this._title.bBox = this._title.element.getBBox() } }, _drawDateMarker: function(date, options, range) { var markerOptions = this._options.marker; var invert = this._translator.getBusinessRange().invert; var textIndent = markerOptions.width + markerOptions.textLeftIndent; var pathElement; if (null === options.x) { return } if (!options.withoutStick) { pathElement = this._renderer.path([options.x, options.y, options.x, options.y + markerOptions.separatorHeight], "line").attr({ "stroke-width": markerOptions.width, stroke: markerOptions.color, "stroke-opacity": markerOptions.opacity, sharp: "h" }).append(this._axisElementsGroup) } var text = String(this.formatLabel(date, options.labelOptions, range)); return { date: date, x: options.x, y: options.y, cropped: options.withoutStick, label: this._renderer.text(text, options.x, options.y).css(patchFontOptions(markerOptions.label.font)).append(this._axisElementsGroup), line: pathElement, getContentContainer() { return this.label }, getEnd: function() { return this.x + (invert ? -1 : 1) * (textIndent + this.labelBBox.width) }, setTitle: function() { this.title = text }, hideLabel: function() { this.label.dispose(); this.label = null; this.title = text }, hide: function() { if (pathElement) { pathElement.dispose(); pathElement = null } this.label.dispose(); this.label = null; this.hidden = true } } }, _drawDateMarkers: function() { var that = this; var options = that._options; var translator = that._translator; var viewport = that._getViewportRange(); var minBound = viewport.minVisible; var dateMarkers = []; var dateMarker; function draw(markerDate, format, withoutStick) { return that._drawDateMarker(markerDate, { x: translator.translate(markerDate), y: markersAreaTop, labelOptions: that._getLabelFormatOptions(format), withoutStick: withoutStick }, viewport) } if (viewport.isEmpty() || !options.marker.visible || "datetime" !== options.argumentType || "discrete" === options.type || that._majorTicks.length <= 1) { return [] } var markersAreaTop = that._axisPosition + options.marker.topIndent; var tickInterval = dateUtils.getDateUnitInterval(this._tickInterval); var markerInterval = getMarkerInterval(tickInterval); var markerDates = getMarkerDates(minBound, viewport.maxVisible, markerInterval); if (markerDates.length > 1 || 1 === markerDates.length && minBound < markerDates[0]) { dateMarkers = markerDates.reduce((function(markers, curDate, i, dates) { var marker = draw(curDate, getMarkerFormat(curDate, dates[i - 1] || minBound < curDate && minBound, tickInterval, markerInterval)); marker && markers.push(marker); return markers }), []); if (minBound < markerDates[0]) { dateMarker = draw(minBound, getMarkerFormat(minBound, markerDates[0], tickInterval, markerInterval), true); dateMarker && dateMarkers.unshift(dateMarker) } } return dateMarkers }, _adjustDateMarkers: function(offset) { offset = offset || 0; var that = this; var markerOptions = this._options.marker; var textIndent = markerOptions.width + markerOptions.textLeftIndent; var invert = this._translator.getBusinessRange().invert; var canvas = that._getCanvasStartEnd(); var dateMarkers = this._dateMarkers; if (!dateMarkers.length) { return offset } if (dateMarkers[0].cropped) { if (!this._checkMarkersPosition(invert, dateMarkers[1], dateMarkers[0])) { dateMarkers[0].hideLabel() } } var prevDateMarker; dateMarkers.forEach((function(marker, i, markers) { if (marker.cropped) { return } if (invert ? marker.getEnd() < canvas.end : marker.getEnd() > canvas.end) { marker.hideLabel() } else if (that._checkMarkersPosition(invert, marker, prevDateMarker)) { prevDateMarker = marker } else { marker.hide() } })); this._dateMarkers.forEach((function(marker) { if (marker.label) { var labelBBox = marker.labelBBox; var dy = marker.y + markerOptions.textTopIndent - labelBBox.y; marker.label.attr({ translateX: invert ? marker.x - textIndent - labelBBox.x - labelBBox.width : marker.x + textIndent - labelBBox.x, translateY: dy + offset }) } if (marker.line) { marker.line.attr({ translateY: offset }) } })); that._initializeMarkersTrackers(offset); return offset + markerOptions.topIndent + markerOptions.separatorHeight }, _checkMarkersPosition: function(invert, dateMarker, prevDateMarker) { if (void 0 === prevDateMarker) { return true } return invert ? dateMarker.x < prevDateMarker.getEnd() : dateMarker.x > prevDateMarker.getEnd() }, _initializeMarkersTrackers: function(offset) { var separatorHeight = this._options.marker.separatorHeight; var renderer = this._renderer; var businessRange = this._translator.getBusinessRange(); var canvas = this._getCanvasStartEnd(); var group = this._axisElementsGroup; this._markerTrackers = this._dateMarkers.filter((function(marker) { return !marker.hidden
})).map((function(marker, i, markers) { var nextMarker = markers[i + 1] || { x: canvas.end, date: businessRange.max }; var x = marker.x; var y = marker.y + offset; var markerTracker = renderer.path([x, y, x, y + separatorHeight, nextMarker.x, y + separatorHeight, nextMarker.x, y, x, y], "area").attr({ "stroke-width": 1, stroke: "grey", fill: "grey", opacity: 1e-4 }).append(group); markerTracker.data("range", { startValue: marker.date, endValue: nextMarker.date }); if (marker.title) { markerTracker.setTitle(marker.title) } return markerTracker })) }, _getLabelFormatOptions: function(formatString) { var markerLabelOptions = this._markerLabelOptions; if (!markerLabelOptions) { this._markerLabelOptions = markerLabelOptions = extend(true, {}, this._options.marker.label) } if (!isDefined(this._options.marker.label.format)) { markerLabelOptions.format = formatString } return markerLabelOptions }, _adjustConstantLineLabels: function(constantLines) { var that = this; var axisPosition = that._options.position; var canvas = that.getCanvas(); var canvasLeft = canvas.left; var canvasRight = canvas.width - canvas.right; var canvasTop = canvas.top; var canvasBottom = canvas.height - canvas.bottom; var verticalCenter = canvasTop + (canvasBottom - canvasTop) / 2; var horizontalCenter = canvasLeft + (canvasRight - canvasLeft) / 2; var maxLabel = 0; constantLines.forEach((function(item) { var isHorizontal = that._isHorizontal; var linesOptions = item.options; var paddingTopBottom = linesOptions.paddingTopBottom; var paddingLeftRight = linesOptions.paddingLeftRight; var labelOptions = linesOptions.label; var labelVerticalAlignment = labelOptions.verticalAlignment; var labelHorizontalAlignment = labelOptions.horizontalAlignment; var labelIsInside = "inside" === labelOptions.position; var label = item.label; var box = item.labelBBox; var translateX; var translateY; if (null === label || box.isEmpty) { return } if (isHorizontal) { if (labelIsInside) { if (labelHorizontalAlignment === LEFT) { translateX = item.coord - paddingLeftRight - box.x - box.width } else { translateX = item.coord + paddingLeftRight - box.x } switch (labelVerticalAlignment) { case CENTER: translateY = verticalCenter - box.y - box.height / 2; break; case BOTTOM: translateY = canvasBottom - paddingTopBottom - box.y - box.height; break; default: translateY = canvasTop + paddingTopBottom - box.y } } else { if (axisPosition === labelVerticalAlignment) { maxLabel = _max(maxLabel, box.height + paddingTopBottom) } translateX = item.coord - box.x - box.width / 2; if (labelVerticalAlignment === BOTTOM) { translateY = canvasBottom + paddingTopBottom - box.y } else { translateY = canvasTop - paddingTopBottom - box.y - box.height } } } else if (labelIsInside) { if (labelVerticalAlignment === BOTTOM) { translateY = item.coord + paddingTopBottom - box.y } else { translateY = item.coord - paddingTopBottom - box.y - box.height } switch (labelHorizontalAlignment) { case CENTER: translateX = horizontalCenter - box.x - box.width / 2; break; case RIGHT: translateX = canvasRight - paddingLeftRight - box.x - box.width; break; default: translateX = canvasLeft + paddingLeftRight - box.x } } else { if (axisPosition === labelHorizontalAlignment) { maxLabel = _max(maxLabel, box.width + paddingLeftRight) } translateY = item.coord - box.y - box.height / 2; if (labelHorizontalAlignment === RIGHT) { translateX = canvasRight + paddingLeftRight - box.x } else { translateX = canvasLeft - paddingLeftRight - box.x - box.width } } label.attr({ translateX: translateX, translateY: translateY }) })); return maxLabel }, _drawConstantLinesForEstimating: function(constantLines) { var that = this; var renderer = this._renderer; var group = renderer.g(); constantLines.forEach((function(options) { that._drawConstantLineLabelText(options.label.text, 0, 0, options.label, group).attr({ align: "center" }) })); return group.append(renderer.root) }, _estimateLabelHeight: function(bBox, labelOptions) { var height = bBox.height; var drawingType = labelOptions.drawingType; if ("stagger" === this._validateDisplayMode(drawingType) || "stagger" === this._validateOverlappingMode(labelOptions.overlappingBehavior, drawingType)) { height = 2 * height + labelOptions.staggeringSpacing } if ("rotate" === this._validateDisplayMode(drawingType) || "rotate" === this._validateOverlappingMode(labelOptions.overlappingBehavior, drawingType)) { var sinCos = getCosAndSin(labelOptions.rotationAngle); height = height * sinCos.cos + bBox.width * sinCos.sin } return height && (height + labelOptions.indentFromAxis || 0) || 0 }, estimateMargins: function(canvas) { this.updateCanvas(canvas); var range = this._getViewportRange(); var ticksData = this._createTicksAndLabelFormat(range); var ticks = ticksData.ticks; var tickInterval = ticksData.tickInterval; var options = this._options; var constantLineOptions = this._outsideConstantLines.filter(l => l.labelOptions.visible).map(l => l.options); var rootElement = this._renderer.root; var labelIsVisible = options.label.visible && !range.isEmpty() && ticks.length; var labelValue = labelIsVisible && this.formatLabel(ticks[ticks.length - 1], options.label, void 0, void 0, tickInterval, ticks); var labelElement = labelIsVisible && this._renderer.text(labelValue, 0, 0).css(this._textFontStyles).attr(this._textOptions).append(rootElement); var titleElement = this._drawTitleText(rootElement, { x: 0, y: 0 }); var constantLinesLabelsElement = this._drawConstantLinesForEstimating(constantLineOptions); var labelBox = !options.label.template && labelElement && labelElement.getBBox() || { x: 0, y: 0, width: 0, height: 0 }; var titleBox = titleElement && titleElement.getBBox() || { x: 0, y: 0, width: 0, height: 0 }; var constantLinesBox = constantLinesLabelsElement.getBBox(); var titleHeight = titleBox.height ? titleBox.height + options.title.margin : 0; var labelHeight = this._estimateLabelHeight(labelBox, options.label); var constantLinesHeight = constantLinesBox.height ? constantLinesBox.height + getMaxConstantLinePadding(constantLineOptions) : 0; var height = labelHeight + titleHeight; var margins = { left: _max(getLeftMargin(labelBox), getLeftMargin(constantLinesBox)), right: _max(getRightMargin(labelBox), getRightMargin(constantLinesBox)), top: ("top" === options.position ? height : 0) + getConstantLineLabelMarginForVerticalAlignment(constantLineOptions, "top", constantLinesHeight), bottom: ("top" !== options.position ? height : 0) + getConstantLineLabelMarginForVerticalAlignment(constantLineOptions, "bottom", constantLinesHeight) }; labelElement && labelElement.remove(); titleElement && titleElement.remove(); constantLinesLabelsElement && constantLinesLabelsElement.remove(); return margins }, _checkAlignmentConstantLineLabels: function(labelOptions) { var position = labelOptions.position; var verticalAlignment = (labelOptions.verticalAlignment || "").toLowerCase(); var horizontalAlignment = (labelOptions.horizontalAlignment || "").toLowerCase(); if (this._isHorizontal) { if ("outside" === position) { verticalAlignment = verticalAlignment === BOTTOM ? BOTTOM : TOP; horizontalAlignment = CENTER } else { verticalAlignment = verticalAlignment === CENTER ? CENTER : verticalAlignment === BOTTOM ? BOTTOM : TOP; horizontalAlignment = horizontalAlignment === LEFT ? LEFT : RIGHT } } else if ("outside" === position) { verticalAlignment = CENTER; horizontalAlignment = horizontalAlignment === LEFT ? LEFT : RIGHT } else { verticalAlignment = verticalAlignment === BOTTOM ? BOTTOM : TOP; horizontalAlignment = horizontalAlignment === RIGHT ? RIGHT : horizontalAlignment === CENTER ? CENTER : LEFT } labelOptions.verticalAlignment = verticalAlignment; labelOptions.horizontalAlignment = horizontalAlignment }, _getConstantLineLabelsCoords: function(value, lineLabelOptions) { var x = value; var y = value; if (this._isHorizontal) { y = this._orthogonalPositions["top" === lineLabelOptions.verticalAlignment ? "start" : "end"] } else { x = this._orthogonalPositions["right" === lineLabelOptions.horizontalAlignment ? "end" : "start"] } return { x: x, y: y } }, _getAdjustedStripLabelCoords: function(strip) { var stripOptions = strip.options; var paddingTopBottom = stripOptions.paddingTopBottom; var paddingLeftRight = stripOptions.paddingLeftRight; var horizontalAlignment = stripOptions.label.horizontalAlignment; var verticalAlignment = stripOptions.label.verticalAlignment; var box = strip.labelBBox; var labelHeight = box.height; var labelWidth = box.width; var labelCoords = strip.labelCoords; var y = labelCoords.y - box.y; var x = labelCoords.x - box.x; if (verticalAlignment === TOP) { y += paddingTopBottom } else if (verticalAlignment === CENTER) { y -= labelHeight / 2 } else if (verticalAlignment === BOTTOM) { y -= paddingTopBottom + labelHeight } if (horizontalAlignment === LEFT) { x += paddingLeftRight } else if (horizontalAlignment === CENTER) { x -= labelWidth / 2 } else if (horizontalAlignment === RIGHT) { x -= paddingLeftRight + labelWidth } return { translateX: x, translateY: y } }, _adjustTitle: function(offset) { offset = offset || 0; if (!this._title) { return } var options = this._options; var position = options.position; var margin = options.title.margin; var title = this._title; var boxTitle = title.bBox; var x = boxTitle.x; var y = boxTitle.y; var width = boxTitle.width; var height = boxTitle.height; var axisPosition = this._axisPosition; var loCoord = axisPosition - margin - offset; var hiCoord = axisPosition + margin + offset; var params = {}; if (this._isHorizontal) { if (position === TOP) { params.translateY = loCoord - (y + height) } else { params.translateY = hiCoord - y } } else if (position === LEFT) { params.translateX = loCoord - (x + width) } else { params.translateX = hiCoord - x } title.element.attr(params) }, _checkTitleOverflow: function(titleElement) { if (!this._title && !titleElement) { return } var canvasLength = this._getScreenDelta(); var title = titleElement ? { bBox: titleElement.getBBox(), element: titleElement } : this._title; var titleOptions = this._options.title; var boxTitle = title.bBox; if ((this._isHorizontal ? boxTitle.width : boxTitle.height) > canvasLength) { title.element.setMaxSize(canvasLength, void 0, { wordWrap: titleOptions.wordWrap || "none", textOverflow: titleOptions.textOverflow || "ellipsis" }); this._wrapped = titleOptions.wordWrap && "none" !== titleOptions.wordWrap } else { var moreThanOriginalSize = title.originalSize && canvasLength > (this._isHorizontal ? title.originalSize.width : title.originalSize.height); !this._wrapped && moreThanOriginalSize && title.element.restoreText() } }, coordsIn: function(x, y) { var canvas = this.getCanvas(); var isHorizontal = this._options.isHorizontal; var position = this._options.position; var coord = isHorizontal ? y : x; if (isHorizontal && (x < canvas.left || x > canvas.width - canvas.right) || !isHorizontal && (y < canvas.top || y > canvas.height - canvas.bottom)) { return false } if (isHorizontal && position === constants.top || !isHorizontal && position === constants.left) { return coord < canvas[position] } return coord > canvas[isHorizontal ? "height" : "width"] - canvas[position] }, _boundaryTicksVisibility: { min: true, max: true }, adjust() { var seriesData = this._seriesData; var viewport = this._series.filter(s => s.isVisible()).reduce((range, s) => { var seriesRange = s.getViewport(); range.min = isDefined(seriesRange.min) ? range.min < seriesRange.min ? range.min : seriesRange.min : range.min; range.max = isDefined(seriesRange.max) ? range.max > seriesRange.max ? range.max : seriesRange.max : range.max; if (s.showZero) { range = new Range(range); range.correctValueZeroLevel() } return range }, {}); if (isDefined(viewport.min) && isDefined(viewport.max)) { seriesData.minVisible = viewport.min; seriesData.maxVisible = viewport.max } seriesData.userBreaks = this._getScaleBreaks(this._options, { minVisible: seriesData.minVisible, maxVisible: seriesData.maxVisible }, this._series, this.isArgumentAxis); this._translator.updateBusinessRange(this._getViewportRange()) }, hasWrap() { return this._wrapped }, getAxisPosition() { return this._axisPosition }, _getStick: function() { return !this._options.valueMarginsEnabled }, _getStripLabelCoords: function(from, to, stripLabelOptions) { var orthogonalPositions = this._orthogonalPositions; var isHorizontal = this._isHorizontal; var horizontalAlignment = stripLabelOptions.horizontalAlignment; var verticalAlignment = stripLabelOptions.verticalAlignment; var x; var y; if (isHorizontal) { if (horizontalAlignment === CENTER) { x = from + (to - from) / 2 } else if (horizontalAlignment === LEFT) { x = from } else if (horizontalAlignment === RIGHT) { x = to } y = orthogonalPositions[getStripVerticalAlignmentPosition(verticalAlignment)] } else { x = orthogonalPositions[getStripHorizontalAlignmentPosition(horizontalAlignment)]; if (verticalAlignment === TOP) { y = from } else if (verticalAlignment === CENTER) { y = to + (from - to) / 2 } else if (verticalAlignment === BOTTOM) { y = to } } return { x: x, y: y } }, _getTranslatedValue: function(value, offset) { var pos1 = this._translator.translate(value, offset, "semidiscrete" === this._options.type && this._options.tickInterval); var pos2 = this._axisPosition; var isHorizontal = this._isHorizontal; return { x: isHorizontal ? pos1 : pos2, y: isHorizontal ? pos2 : pos1 } }, areCoordsOutsideAxis: function(coords) { var coord = this._isHorizontal ? coords.x : coords.y; var visibleArea = this.getVisibleArea(); if (coord < visibleArea[0] || coord > visibleArea[1]) { return true } return false }, _getSkippedCategory: function(ticks) { var skippedCategory; if (this._options.type === constants.discrete && this._tickOffset && 0 !== ticks.length) { skippedCategory = ticks[ticks.length - 1] } return skippedCategory }, _filterBreaks: function(breaks, viewport, breakStyle) { var minVisible = viewport.minVisible; var maxVisible = viewport.maxVisible; var breakSize = breakStyle ? breakStyle.width : 0; return breaks.reduce((function(result, currentBreak) { var from = currentBreak.from; var to = currentBreak.to; var lastResult = result[result.length - 1]; var newBreak; if (!isDefined(from) || !isDefined(to)) { return result } if (from > to) { to = [from, from = to][0] } if (result.length && from < lastResult.to) { if (to > lastResult.to) { lastResult.to = to > maxVisible ? maxVisible : to; if (lastResult.gapSize) { lastResult.gapSize = void 0; lastResult.cumulativeWidth += breakSize } } } else if (from >= minVisible && from < maxVisible || to <= maxVisible && to > minVisible) { from = from >= minVisible ? from : minVisible; to = to <= maxVisible ? to : maxVisible; if (to - from < maxVisible - minVisible) { var _lastResult$cumulativ; newBreak = { from: from, to: to, cumulativeWidth: (null !== (_lastResult$cumulativ = null === lastResult || void 0 === lastResult ? void 0 : lastResult.cumulativeWidth) && void 0 !== _lastResult$cumulativ ? _lastResult$cumulativ : 0) + breakSize }; if (currentBreak.gapSize) { var _lastResult$cumulativ2; newBreak.gapSize = dateUtils.convertMillisecondsToDateUnits(to - from); newBreak.cumulativeWidth = null !== (_lastResult$cumulativ2 = null === lastResult || void 0 === lastResult ? void 0 : lastResult.cumulativeWidth) && void 0 !== _lastResult$cumulativ2 ? _lastResult$cumulativ2 : 0 } result.push(newBreak) } } return result }), []) }, _getScaleBreaks: function(axisOptions, viewport, series, isArgumentAxis) { var that = this; var breaks = (axisOptions.breaks || []).map((function(b) { return { from: that.parser(b.startValue), to: that.parser(b.endValue) } })); if ("discrete" !== axisOptions.type && "datetime" === axisOptions.dataType && axisOptions.workdaysOnly) { breaks = breaks.concat(generateDateBreaks(viewport.minVisible, viewport.maxVisible, axisOptions.workWeek, axisOptions.singleWorkdays, axisOptions.holidays)) } if (!isArgumentAxis && "discrete" !== axisOptions.type && "datetime" !== axisOptions.dataType && axisOptions.autoBreaksEnabled && 0 !== axisOptions.maxAutoBreakCount) { breaks = breaks.concat(generateAutoBreaks(axisOptions, series, viewport)) } return sortingBreaks(breaks) }, _drawBreak: function(translatedEnd, positionFrom, positionTo, width, options, group) { var breakStart = translatedEnd - (!this._translator.isInverted() ? width + 1 : 0); var attr = { "stroke-width": 1, stroke: options.borderColor, sharp: !options.isWaved ? options.isHorizontal ? "h" : "v" : void 0 }; var spaceAttr = { stroke: options.color, "stroke-width": width }; var getPoints = this._isHorizontal ? rotateLine : function(p) { return p }; var drawer = getLineDrawer(this._renderer, group, getPoints, positionFrom, breakStart, positionTo, options.isWaved); drawer(width / 2, spaceAttr); drawer(0, attr); drawer(width, attr) }, _createBreakClipRect: function(from, to) { var canvas = this._canvas; var clipWidth = to - from; var clipRect; if (this._isHorizontal) { clipRect = this._renderer.clipRect(canvas.left, from, canvas.width, clipWidth) } else { clipRect = this._renderer.clipRect(from, canvas.top, clipWidth, canvas.height) } this._breaksElements = this._breaksElements || []; this._breaksElements.push(clipRect); return clipRect.id }, _createBreaksGroup: function(clipFrom, clipTo) { var group = this._renderer.g().attr({ class: this._axisCssPrefix + "breaks", "clip-path": this._createBreakClipRect(clipFrom, clipTo) }).append(this._scaleBreaksGroup); this._breaksElements = this._breaksElements || []; this._breaksElements.push(group); return group }, _disposeBreaksGroup: function() { (this._breaksElements || []).forEach((function(clipRect) { clipRect.dispose() })); this._breaksElements = null }, drawScaleBreaks: function(customCanvas) { var that = this; var options = that._options; var breakStyle = options.breakStyle; var position = options.position; var positionFrom; var positionTo; var breaks = that._translator.getBusinessRange().breaks || []; var additionGroup; var additionBreakFrom; var additionBreakTo; that._disposeBreaksGroup(); if (!(breaks && breaks.length)) { return } var breakOptions = { color: that._options.containerColor, borderColor: breakStyle.color, isHorizontal: that._isHorizontal, isWaved: "straight" !== breakStyle.line.toLowerCase() }; if (customCanvas) { positionFrom = customCanvas.start; positionTo = customCanvas.end } else { positionFrom = that._orthogonalPositions.start - (options.visible && !that._axisShift && (position === LEFT || position === TOP) ? SCALE_BREAK_OFFSET : 0); positionTo = that._orthogonalPositions.end + (options.visible && (position === RIGHT || position === BOTTOM) ? SCALE_BREAK_OFFSET : 0) } var mainGroup = that._createBreaksGroup(positionFrom, positionTo); if (that._axisShift && options.visible) { additionBreakFrom = that._axisPosition - that._axisShift - SCALE_BREAK_OFFSET; additionBreakTo = additionBreakFrom + 2 * SCALE_BREAK_OFFSET; additionGroup = that._createBreaksGroup(additionBreakFrom, additionBreakTo) } breaks.forEach((function(br) { if (!br.gapSize) { var breakCoord = that._getTranslatedCoord(br.to); that._drawBreak(breakCoord, positionFrom, positionTo, breakStyle.width, breakOptions, mainGroup); if (that._axisShift && options.visible) { that._drawBreak(breakCoord, additionBreakFrom, additionBreakTo, breakStyle.width, breakOptions, additionGroup) } } })) }, _getSpiderCategoryOption: noop, shift: function(margins) { var options = this._options; var isHorizontal = options.isHorizontal; var axesSpacing = this.getMultipleAxesSpacing(); var constantLinesGroups = this._axisConstantLineGroups; function shiftGroup(side, group) { var attr = { translateX: 0, translateY: 0 }; var shift = margins[side] ? margins[side] + axesSpacing : 0; attr[isHorizontal ? "translateY" : "translateX"] = (side === LEFT || side === TOP ? -1 : 1) * shift; (group[side] || group).attr(attr); return shift } this._axisShift = shiftGroup(options.position, this._axisGroup); shiftGroup(options.position, this._axisElementsGroup); (isHorizontal ? [TOP, BOTTOM] : [LEFT, RIGHT]).forEach(side => { shiftGroup(side, constantLinesGroups.above); shiftGroup(side, constantLinesGroups.under) }) }, getCustomPosition(position) { var orthogonalAxis = this.getOrthogonalAxis(); var resolvedPosition = null !== position && void 0 !== position ? position : this.getResolvedPositionOption(); var offset = this.getOptions().offset; var orthogonalTranslator = orthogonalAxis.getTranslator(); var orthogonalAxisType = orthogonalAxis.getOptions().type; var validPosition = orthogonalAxis.validateUnit(resolvedPosition); var currentPosition; if ("discrete" === orthogonalAxisType && (!orthogonalTranslator._categories || orthogonalTranslator._categories.indexOf(validPosition) < 0)) { validPosition = void 0 } if (this.positionIsBoundary(resolvedPosition)) { currentPosition = this.getPredefinedPosition(resolvedPosition) } else if (!isDefined(validPosition)) { currentPosition = this.getPredefinedPosition(this.getOptions().position) } else { currentPosition = orthogonalTranslator.to(validPosition, -1) } if (isFinite(currentPosition) && isFinite(offset)) { currentPosition += offset } return currentPosition }, getCustomBoundaryPosition(position) { var { customPosition: customPosition, offset: offset } = this.getOptions(); var resolvedPosition = null !== position && void 0 !== position ? position : this.getResolvedPositionOption(); var orthogonalAxis = this.getOrthogonalAxis(); var orthogonalTranslator = orthogonalAxis.getTranslator(); var visibleArea = orthogonalTranslator.getCanvasVisibleArea(); if (!isDefined(orthogonalAxis._orthogonalPositions) || 0 === orthogonalTranslator.canvasLength) { return } var currentPosition = this.getCustomPosition(resolvedPosition); if (!isDefined(currentPosition)) { return this.getResolvedBoundaryPosition() } else if (isDefined(customPosition)) { if (currentPosition <= visibleArea.min) { return this._isHorizontal ? TOP : LEFT } else if (currentPosition >= visibleArea.max) { return this._isHorizontal ? BOTTOM : RIGHT } } else if (isDefined(offset)) { if (currentPosition <= this._orthogonalPositions.start) { return this._isHorizontal ? TOP : LEFT } else if (currentPosition >= this._orthogonalPositions.end) { return this._isHorizontal ? BOTTOM : RIGHT } } return currentPosition }, getResolvedPositionOption() { var _options$customPositi; var options = this.getOptions(); return null !== (_options$customPositi = options.customPosition) && void 0 !== _options$customPositi ? _options$customPositi : options.position }, customPositionIsAvailable() { var options = this.getOptions(); return isDefined(this.getOrthogonalAxis()) && (isDefined(options.customPosition) || isFinite(options.offset)) }, hasNonBoundaryPosition() { return this.customPositionIsAvailable() && !this.customPositionIsBoundary() }, getResolvedBoundaryPosition() { return this.customPositionIsBoundary() ? this._customBoundaryPosition : this.getOptions().position }, customPositionEqualsToPredefined() { return this.customPositionIsBoundary() && this._customBoundaryPosition === this.getOptions().position }, customPositionIsBoundary() { return this.positionIsBoundary(this._customBoundaryPosition) }, positionIsBoundary: position => [TOP, LEFT, BOTTOM, RIGHT].indexOf(position) >= 0, getPredefinedPosition(position) { var _this$_orthogonalPosi; return null === (_this$_orthogonalPosi = this._orthogonalPositions) || void 0 === _this$_orthogonalPosi ? void 0 : _this$_orthogonalPosi[position === TOP || position === LEFT ? "start" : "end"] }, resolveOverlappingForCustomPositioning(oppositeAxes) { var that = this; if (!that.hasNonBoundaryPosition() && !that.customPositionIsBoundary() && !oppositeAxes.some(a => a.hasNonBoundaryPosition())) { return } var overlappingObj = { axes: [], ticks: [] }; oppositeAxes.filter(orthogonalAxis => orthogonalAxis.pane === that.pane).forEach(orthogonalAxis => { for (var i = 0; i < that._majorTicks.length; i++) { var tick = that._majorTicks[i]; var label = tick.label; if (label) { if (overlappingObj.axes.indexOf(orthogonalAxis) < 0 && that._detectElementsOverlapping(label, orthogonalAxis._axisElement)) { overlappingObj.axes.push(orthogonalAxis); that._shiftThroughOrthogonalAxisOverlappedTick(label, orthogonalAxis) } for (var j = 0; j < orthogonalAxis._majorTicks.length; j++) { var oppositeTick = orthogonalAxis._majorTicks[j]; var oppositeLabel = oppositeTick.label; if (oppositeLabel && that._detectElementsOverlapping(label, oppositeLabel)) { overlappingObj.ticks.push(tick); that._shiftThroughAxisOverlappedTick(tick); i = that._majorTicks.length; break } } } if (tick.mark && overlappingObj.ticks.indexOf(tick) < 0) { if (that._isHorizontal && tick.mark.attr("translateY")) { tick.mark.attr({ translateY: 0 }) } else if (!that._isHorizontal && tick.mark.attr("translateX")) { tick.mark.attr({ translateX: 0 }) } } } }) }, _shiftThroughOrthogonalAxisOverlappedTick(label, orthogonalAxis) { var labelBBox = label.getBBox(); var orthogonalAxisPosition = orthogonalAxis.getAxisPosition(); var orthogonalAxisLabelOptions = orthogonalAxis.getOptions().label; var orthogonalAxisLabelPosition = orthogonalAxisLabelOptions.position; var orthogonalAxisLabelIndent = orthogonalAxisLabelOptions.indentFromAxis / 2; var translateCoordName = this._isHorizontal ? "translateX" : "translateY"; var defaultOrthogonalAxisLabelPosition = this._isHorizontal ? LEFT : TOP; var translate = label.attr(translateCoordName); var labelCoord = (this._isHorizontal ? labelBBox.x : labelBBox.y) + translate; var labelSize = this._isHorizontal ? labelBBox.width : labelBBox.height; var outsidePart = orthogonalAxisPosition - labelCoord; var insidePart = labelCoord + labelSize - orthogonalAxisPosition; var attr = {}; attr[translateCoordName] = translate; if (outsidePart > 0 && insidePart > 0) { if (insidePart - outsidePart > 1) { attr[translateCoordName] += outsidePart + orthogonalAxisLabelIndent } else if (outsidePart - insidePart > 1) { attr[translateCoordName] -= insidePart + orthogonalAxisLabelIndent } else { attr[translateCoordName] += orthogonalAxisLabelPosition === defaultOrthogonalAxisLabelPosition ? outsidePart + orthogonalAxisLabelIndent : -(insidePart + orthogonalAxisLabelIndent) } label.attr(attr) } }, _shiftThroughAxisOverlappedTick(tick) { var _tick$mark; var label = tick.label; if (!label) { return } var labelBBox = label.getBBox(); var tickMarkBBox = null === (_tick$mark = tick.mark) || void 0 === _tick$mark ? void 0 : _tick$mark.getBBox(); var axisPosition = this.getAxisPosition(); var labelOptions = this.getOptions().label; var labelIndent = labelOptions.indentFromAxis; var labelPosition = labelOptions.position; var defaultLabelPosition = this._isHorizontal ? TOP : LEFT; var translateCoordName = this._isHorizontal ? "translateY" : "translateX"; var translate = label.attr(translateCoordName); var labelCoord = (this._isHorizontal ? labelBBox.y : labelBBox.x) + translate; var labelSize = this._isHorizontal ? labelBBox.height : labelBBox.width; var attr = {}; attr[translateCoordName] = translate + (labelPosition === defaultLabelPosition ? axisPosition - labelCoord + labelIndent : -(labelCoord - axisPosition + labelSize + labelIndent)); label.attr(attr); if (tick.mark) { var markerSize = this._isHorizontal ? tickMarkBBox.height : tickMarkBBox.width; var dir = labelPosition === defaultLabelPosition ? 1 : -1; attr[translateCoordName] = dir * (markerSize - 1); tick.mark.attr(attr) } }, _detectElementsOverlapping(element1, element2) { if (!element1 || !element2) { return false } var bBox1 = element1.getBBox(); var x1 = bBox1.x + element1.attr("translateX"); var y1 = bBox1.y + element1.attr("translateY"); var bBox2 = element2.getBBox(); var x2 = bBox2.x + element2.attr("translateX"); var y2 = bBox2.y + element2.attr("translateY"); return (x2 >= x1 && x2 <= x1 + bBox1.width || x1 >= x2 && x1 <= x2 + bBox2.width) && (y2 >= y1 && y2 <= y1 + bBox1.height || y1 >= y2 && y1 <= y2 + bBox2.height) } } }; function getLineDrawer(renderer, root, rotatePoints, positionFrom, breakStart, positionTo, isWaved) { var elementType = isWaved ? "bezier" : "line"; var group = renderer.g().append(root); return function(offset, attr) { renderer.path(rotatePoints(getPoints(positionFrom, breakStart, positionTo, offset, isWaved)), elementType).attr(attr).append(group) } } function getPoints(positionFrom, breakStart, positionTo, offset, isWaved) { if (!isWaved) { return [positionFrom, breakStart + offset, positionTo, breakStart + offset] } breakStart += offset; var currentPosition; var topPoint = breakStart + WAVED_LINE_TOP; var centerPoint = breakStart + WAVED_LINE_CENTER; var bottomPoint = breakStart + WAVED_LINE_BOTTOM; var points = [ [positionFrom, centerPoint] ]; for (currentPosition = positionFrom; currentPosition < positionTo + WAVED_LINE_LENGTH; currentPosition += WAVED_LINE_LENGTH) { points.push([currentPosition + 6, topPoint, currentPosition + 6, topPoint, currentPosition + 12, centerPoint, currentPosition + 18, bottomPoint, currentPosition + 18, bottomPoint, currentPosition + 24, centerPoint]) } return [].concat.apply([], points) } function rotateLine(lineCoords) { var points = []; var i; for (i = 0; i < lineCoords.length; i += 2) { points.push(lineCoords[i + 1]); points.push(lineCoords[i]) } return points }
checkbox.rs
use Cursive; use Printer; use With; use direction::Direction; use event::{Event, EventResult, Key}; use std::rc::Rc; use theme::ColorStyle; use vec::Vec2; use view::View; /// Checkable box. pub struct Checkbox { checked: bool, enabled: bool, on_change: Option<Rc<Fn(&mut Cursive, bool)>>, } new_default!(Checkbox); impl Checkbox { impl_enabled!(self.enabled); /// Creates a new, unchecked checkbox. pub fn new() -> Self { Checkbox { checked: false, enabled: true, on_change: None, } } /// Sets a callback to be used when the state changes. pub fn set_on_change<F: 'static + Fn(&mut Cursive, bool)>(&mut self, on_change: F) { self.on_change = Some(Rc::new(on_change)); } /// Sets a callback to be used when the state changes. /// /// Chainable variant. pub fn on_change<F: 'static + Fn(&mut Cursive, bool)>(self, on_change: F) -> Self { self.with(|s| s.set_on_change(on_change)) } /// Toggles the checkbox state. pub fn
(&mut self) -> EventResult { let checked = !self.checked; self.set_checked(checked) } /// Check the checkbox. pub fn check(&mut self) -> EventResult { self.set_checked(true) } /// Check the checkbox. /// /// Chainable variant. pub fn checked(self) -> Self { self.with(|s| { s.check(); }) } /// Returns `true` if the checkbox is checked. pub fn is_checked(&self) -> bool { self.checked } /// Uncheck the checkbox. pub fn uncheck(&mut self) -> EventResult { self.set_checked(false) } /// Uncheck the checkbox. /// /// Chainable variant. pub fn unchecked(self) -> Self { self.with(|s| { s.uncheck(); }) } /// Sets the checkbox state. pub fn set_checked(&mut self, checked: bool) -> EventResult { self.checked = checked; if let Some(ref on_change) = self.on_change { let on_change = on_change.clone(); EventResult::with_cb(move |s| on_change(s, checked)) } else { EventResult::Consumed(None) } } fn draw_internal(&self, printer: &Printer) { printer.print((0, 0), "[ ]"); if self.checked { printer.print((1, 0), "X"); } } } impl View for Checkbox { fn required_size(&mut self, _: Vec2) -> Vec2 { Vec2::new(3, 1) } fn take_focus(&mut self, _: Direction) -> bool { self.enabled } fn draw(&self, printer: &Printer) { if self.enabled { printer.with_selection(printer.focused, |printer| self.draw_internal(printer)); } else { printer.with_color(ColorStyle::Secondary, |printer| self.draw_internal(printer)); } } fn on_event(&mut self, event: Event) -> EventResult { match event { Event::Key(Key::Enter) | Event::Char(' ') => self.toggle(), _ => EventResult::Ignored, } } }
toggle
stackImageIndexSynchronizer.js
import external from '../externalModules.js'; import { getToolState } from '../stateManagement/toolState.js';
// This function causes the image in the target stack to be set to the one closest // To the image in the source stack by image position export default function (synchronizer, sourceElement, targetElement) { // Ignore the case where the source and target are the same enabled element if (targetElement === sourceElement) { return; } const cornerstone = external.cornerstone; const sourceStackToolDataSource = getToolState(sourceElement, 'stack'); const sourceStackData = sourceStackToolDataSource.data[0]; const targetStackToolDataSource = getToolState(targetElement, 'stack'); const targetStackData = targetStackToolDataSource.data[0]; let newImageIdIndex = sourceStackData.currentImageIdIndex; // Clamp the index newImageIdIndex = clip(newImageIdIndex, 0, targetStackData.imageIds.length - 1); // Do nothing if the index has not changed if (newImageIdIndex === targetStackData.currentImageIdIndex) { return; } const startLoadingHandler = loadHandlerManager.getStartLoadHandler(); const endLoadingHandler = loadHandlerManager.getEndLoadHandler(); const errorLoadingHandler = loadHandlerManager.getErrorLoadingHandler(); if (startLoadingHandler) { startLoadingHandler(targetElement); } let loader; if (targetStackData.preventCache === true) { loader = cornerstone.loadImage(targetStackData.imageIds[newImageIdIndex]); } else { loader = cornerstone.loadAndCacheImage(targetStackData.imageIds[newImageIdIndex]); } loader.then(function (image) { const viewport = cornerstone.getViewport(targetElement); targetStackData.currentImageIdIndex = newImageIdIndex; synchronizer.displayImage(targetElement, image, viewport); if (endLoadingHandler) { endLoadingHandler(targetElement, image); } }, function (error) { const imageId = targetStackData.imageIds[newImageIdIndex]; if (errorLoadingHandler) { errorLoadingHandler(targetElement, imageId, error); } }); }
import loadHandlerManager from '../stateManagement/loadHandlerManager.js'; import clip from '../util/clip.js';
invitation_test.go
package server import ( "bytes" "fmt" "net/http" "net/http/httptest" "net/url" "testing" "time" "github.com/jonboulle/clockwork" "github.com/coreos/dex/user" "github.com/coreos/go-oidc/jose" "github.com/coreos/go-oidc/key" ) var ( clock = clockwork.NewRealClock() ) func
(t *testing.T) { invUserID := "ID-1" invVerifiedID := "ID-Verified" invGoodSigner := key.NewPrivateKeySet([]*key.PrivateKey{testPrivKey}, time.Now().Add(time.Minute)).Active().Signer() badKey, err := key.GeneratePrivateKey() if err != nil { panic(fmt.Sprintf("couldn't make new key: %q", err)) } invBadSigner := key.NewPrivateKeySet([]*key.PrivateKey{badKey}, time.Now().Add(time.Minute)).Active().Signer() makeInvitationToken := func(password, userID, clientID, email string, callback url.URL, expires time.Duration, signer jose.Signer) string { iv := user.NewInvitation( user.User{ID: userID, Email: email}, user.Password(password), testIssuerURL, clientID, callback, expires) jwt, err := jose.NewSignedJWT(iv.Claims, signer) if err != nil { t.Fatalf("couldn't make token: %q", err) } token := jwt.Encode() return token } tests := []struct { userID string query url.Values signer jose.Signer wantCode int wantCallback url.URL wantEmailVerified bool }{ { // Case 0 Happy Path userID: invUserID, query: url.Values{ "token": []string{makeInvitationToken("password", invUserID, testClientID, "[email protected]", testRedirectURL, time.Hour*1, invGoodSigner)}, }, signer: invGoodSigner, wantCode: http.StatusSeeOther, wantCallback: testRedirectURL, wantEmailVerified: true, }, { // Case 1 user already verified userID: invVerifiedID, query: url.Values{ "token": []string{makeInvitationToken("password", invVerifiedID, testClientID, "[email protected]", testRedirectURL, time.Hour*1, invGoodSigner)}, }, signer: invGoodSigner, wantCode: http.StatusSeeOther, wantCallback: testRedirectURL, wantEmailVerified: true, }, { // Case 2 bad email userID: invUserID, query: url.Values{ "token": []string{makeInvitationToken("password", invVerifiedID, testClientID, "[email protected]", testRedirectURL, time.Hour*1, invGoodSigner)}, }, signer: invGoodSigner, wantCode: http.StatusBadRequest, wantCallback: testRedirectURL, wantEmailVerified: false, }, { // Case 3 bad signer userID: invUserID, query: url.Values{ "token": []string{makeInvitationToken("password", invUserID, testClientID, "[email protected]", testRedirectURL, time.Hour*1, invBadSigner)}, }, signer: invGoodSigner, wantCode: http.StatusBadRequest, wantCallback: testRedirectURL, wantEmailVerified: false, }, } for i, tt := range tests { f, err := makeTestFixtures() if err != nil { t.Fatalf("case %d: could not make test fixtures: %v", i, err) } keys, err := f.srv.KeyManager.PublicKeys() if err != nil { t.Fatalf("case %d: test fixture key infrastructure is broken: %v", i, err) } tZero := clock.Now() handler := &InvitationHandler{ passwordResetURL: f.srv.absURL("RESETME"), issuerURL: testIssuerURL, um: f.srv.UserManager, keysFunc: f.srv.KeyManager.PublicKeys, signerFunc: func() (jose.Signer, error) { return tt.signer, nil }, redirectValidityWindow: 100 * time.Second, } w := httptest.NewRecorder() u := testIssuerURL u.RawQuery = tt.query.Encode() req, err := http.NewRequest("GET", u.String(), nil) if err != nil { t.Fatalf("case %d: impossible error: %v", i, err) } handler.ServeHTTP(w, req) if tt.wantCode != w.Code { t.Errorf("case %d: wantCode=%v, got=%v", i, tt.wantCode, w.Code) continue } usr, err := f.srv.UserManager.Get(tt.userID) if err != nil { t.Fatalf("case %d: unexpected error: %v", i, err) } if usr.EmailVerified != tt.wantEmailVerified { t.Errorf("case %d: wantEmailVerified=%v got=%v", i, tt.wantEmailVerified, usr.EmailVerified) } if w.Code == http.StatusSeeOther { locString := w.HeaderMap.Get("Location") loc, err := url.Parse(locString) if err != nil { t.Fatalf("case %d: redirect returned nonsense url: '%v', %v", i, locString, err) } pwrToken := loc.Query().Get("token") pwrReset, err := user.ParseAndVerifyPasswordResetToken(pwrToken, testIssuerURL, keys) if err != nil { t.Errorf("case %d: password token is invalid: %v", i, err) } expTime := pwrReset.Claims["exp"].(int64) if expTime > tZero.Add(handler.redirectValidityWindow).Unix() || expTime < tZero.Unix() { t.Errorf("case %d: funny expiration time detected: %d", i, pwrReset.Claims["exp"]) } if pwrReset.Claims["aud"] != testClientID { t.Errorf("case %d: wanted \"aud\"=%v got=%v", i, testClientID, pwrReset.Claims["aud"]) } if pwrReset.Claims["iss"] != testIssuerURL.String() { t.Errorf("case %d: wanted \"iss\"=%v got=%v", i, testIssuerURL, pwrReset.Claims["iss"]) } if pwrReset.UserID() != tt.userID { t.Errorf("case %d: wanted UserID=%v got=%v", i, tt.userID, pwrReset.UserID()) } if bytes.Compare(pwrReset.Password(), user.Password("password")) != 0 { t.Errorf("case %d: wanted Password=%v got=%v", i, user.Password("password"), pwrReset.Password()) } if *pwrReset.Callback() != testRedirectURL { t.Errorf("case %d: wanted callback=%v got=%v", i, testRedirectURL, pwrReset.Callback()) } } } }
TestInvitationHandler
clientset_generated.go
// Code generated by client-gen. DO NOT EDIT. package fake import ( clientset "github.com/ray-project/kuberay/ray-operator/pkg/client/clientset/versioned" rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/pkg/client/clientset/versioned/typed/raycluster/v1alpha1" fakerayv1alpha1 "github.com/ray-project/kuberay/ray-operator/pkg/client/clientset/versioned/typed/raycluster/v1alpha1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, // without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. func
(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { panic(err) } } cs := &Clientset{tracker: o} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { gvr := action.GetResource() ns := action.GetNamespace() watch, err := o.Watch(gvr, ns) if err != nil { return false, nil, err } return true, watch, nil }) return cs } // Clientset implements clientset.Interface. Meant to be embedded into a // struct to get a default implementation. This makes faking out just the method // you want to test easier. type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } var _ clientset.Interface = &Clientset{} // RayV1alpha1 retrieves the RayV1alpha1Client func (c *Clientset) RayV1alpha1() rayv1alpha1.RayV1alpha1Interface { return &fakerayv1alpha1.FakeRayV1alpha1{Fake: &c.Fake} }
NewSimpleClientset
imagechangetriggers.go
package builds import ( "time" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" exutil "github.com/openshift/openshift-tests/test/extended/util" ) var _ = g.Describe("[Feature:Builds][Conformance] imagechangetriggers", func() { defer g.GinkgoRecover()
var ( buildFixture = exutil.FixturePath("testdata", "builds", "test-imagechangetriggers.yaml") oc = exutil.NewCLI("imagechangetriggers", exutil.KubeConfigPath()) ) g.Context("", func() { g.BeforeEach(func() { exutil.PreTestDump() }) g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { exutil.DumpPodStates(oc) exutil.DumpConfigMapStates(oc) exutil.DumpPodLogsStartingWith("", oc) } }) g.It("imagechangetriggers should trigger builds of all types", func() { err := oc.AsAdmin().Run("create").Args("-f", buildFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) err = wait.Poll(time.Second, 30*time.Second, func() (done bool, err error) { for _, build := range []string{"bc-docker-1", "bc-jenkins-1", "bc-source-1", "bc-custom-1"} { _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(build, metav1.GetOptions{}) if err == nil { continue } if kerrors.IsNotFound(err) { return false, nil } return false, err } return true, nil }) o.Expect(err).NotTo(o.HaveOccurred()) }) }) })
__main__.py
import poets def
(): print(poets.poets()) if __name__ == '__main__': main()
main
collection.py
# Copyright (C) 2019-2021 Zilliz. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. import copy import json import pandas from .connections import get_connection from .schema import ( CollectionSchema, FieldSchema, parse_fields_from_data, infer_dtype_bydata, ) from .prepare import Prepare from .partition import Partition from .index import Index from .search import SearchResult from .mutation import MutationResult from .types import DataType from .exceptions import ( SchemaNotReadyException, DataTypeNotMatchException, DataNotMatchException, ConnectionNotExistException, PartitionAlreadyExistException, PartitionNotExistException, IndexNotExistException, AutoIDException, ExceptionsMessage, ) from .future import SearchFuture, MutationFuture def _check_schema(schema): if schema is None: raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema) if len(schema.fields) < 1: raise SchemaNotReadyException(0, ExceptionsMessage.EmptySchema) vector_fields = [] for field in schema.fields: if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR: vector_fields.append(field.name) if len(vector_fields) < 1: raise SchemaNotReadyException(0, ExceptionsMessage.NoVector) def _check_data_schema(fields, data): if isinstance(data, pandas.DataFrame): for i, field in enumerate(fields): for j, _ in enumerate(data[field.name]): tmp_type = infer_dtype_bydata(data[field.name].iloc[j]) if tmp_type != field.dtype: raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent) else: for i, field in enumerate(fields): for j, _ in enumerate(data[i]): tmp_type = infer_dtype_bydata(data[i][j]) if tmp_type != field.dtype: raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent) class Collection: """ This is a class corresponding to collection in milvus. """ def __init__(self, name, schema=None, using="default", shards_num=2, **kwargs): """ Constructs a collection by name, schema and other parameters. Connection information is contained in kwargs. :param name: the name of collection :type name: str :param schema: the schema of collection :type schema: class `schema.CollectionSchema` :param using: Milvus link of create collection :type using: str :param shards_num: How wide to scale collection. Corresponds to how many active datanodes can be used on insert. :type shards_num: int :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f9a190ca898> >>> fields = [ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128) ... ] >>> description="This is a new collection description." >>> schema = CollectionSchema(fields=fields, description=description) >>> collection = Collection(name="test_collection_init", schema=schema) >>> collection.name 'test_collection_init' >>> collection.description 'This is a new collection description.' >>> collection.is_empty True >>> collection.num_entities 0 """ self._name = name self._using = using self._shards_num = shards_num self._kwargs = kwargs conn = self._get_connection() has = conn.has_collection(self._name) if has: resp = conn.describe_collection(self._name) server_schema = CollectionSchema.construct_from_dict(resp) if schema is None: self._schema = server_schema else: if not isinstance(schema, CollectionSchema): raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType) if server_schema != schema: raise SchemaNotReadyException(0, ExceptionsMessage.SchemaInconsistent) self._schema = schema else: if schema is None: raise SchemaNotReadyException(0, ExceptionsMessage.CollectionNotExistNoSchema % name) if isinstance(schema, CollectionSchema): _check_schema(schema) conn.create_collection(self._name, fields=schema.to_dict(), shards_num=self._shards_num) self._schema = schema else: raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType) def __repr__(self): return json.dumps({ 'name': self.name, 'schema': self._schema.to_dict(), 'partitions': [json.loads(p.__repr__()) for p in self.partitions], 'description': self.description, }) def _get_connection(self): conn = get_connection(self._using) if conn is None: raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst) return conn def _check_insert_data_schema(self, data): """ Checks whether the data type matches the schema. """ if self._schema is None: return False if self._schema.auto_id: if isinstance(data, pandas.DataFrame): if self._schema.primary_field.name in data: if not data[self._schema.primary_field.name].isnull().all(): raise DataNotMatchException(0, ExceptionsMessage.AutoIDWithData) data = data.drop(self._schema.primary_field.name, axis=1) infer_fields = parse_fields_from_data(data) tmp_fields = copy.deepcopy(self._schema.fields) for i, field in enumerate(self._schema.fields): if field.is_primary and field.auto_id: tmp_fields.pop(i) if len(infer_fields) != len(tmp_fields): raise DataTypeNotMatchException(0, ExceptionsMessage.FieldsNumInconsistent) _check_data_schema(infer_fields, data) for x, y in zip(infer_fields, tmp_fields): if x.dtype != y.dtype: return False if isinstance(data, pandas.DataFrame): if x.name != y.name: return False # todo check dim return True def _check_schema(self): if self._schema is None: raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema) def _get_vector_field(self) -> str: for field in self._schema.fields: if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR: return field.name raise SchemaNotReadyException(0, ExceptionsMessage.NoVector) @classmethod def construct_from_dataframe(cls, name, dataframe, **kwargs): if dataframe is None: raise SchemaNotReadyException(0, ExceptionsMessage.NoneDataFrame) if not isinstance(dataframe, pandas.DataFrame): raise SchemaNotReadyException(0, ExceptionsMessage.DataFrameType) primary_field = kwargs.pop("primary_field", None) if primary_field is None: raise SchemaNotReadyException(0, ExceptionsMessage.NoPrimaryKey) pk_index = -1 for i, field in enumerate(dataframe): if field == primary_field: pk_index = i if pk_index == -1: raise SchemaNotReadyException(0, ExceptionsMessage.PrimaryKeyNotExist) if "auto_id" in kwargs: if not isinstance(kwargs.get("auto_id", None), bool): raise AutoIDException(0, ExceptionsMessage.AutoIDType) auto_id = kwargs.pop("auto_id", False) if auto_id: if dataframe[primary_field].isnull().all(): dataframe = dataframe.drop(primary_field, axis=1) else: raise SchemaNotReadyException(0, ExceptionsMessage.AutoIDWithData) fields = parse_fields_from_data(dataframe) _check_data_schema(fields, dataframe) if auto_id: fields.insert(pk_index, FieldSchema(name=primary_field, dtype=DataType.INT64, is_primary=True, auto_id=True, **kwargs)) else: for field in fields: if field.name == primary_field: field.is_primary = True field.auto_id = False schema = CollectionSchema(fields=fields) _check_schema(schema) collection = cls(name, schema, **kwargs) res = collection.insert(data=dataframe) return collection, res @property def schema(self) -> CollectionSchema: """ Returns the schema of the collection. :return schema.CollectionSchema: Schema of the collection. """ return self._schema @property def description(self) -> str: """ Returns a text description of the collection. :return str: Collection description text, returned when the operation succeeds. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> fields = [ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128) ... ] >>> description="This is an example text description." >>> schema = CollectionSchema(fields=fields, description=description) >>> collection = Collection(name="test_collection_description", schema=schema) >>> collection.description 'This is an example text description.' """ return self._schema.description @property def name(self) -> str: """ Returns the collection name. :return str: The collection name, returned when the operation succeeds. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> fields = [ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128) ... ] >>> schema = CollectionSchema(fields) >>> collection = Collection("test_collection_name", schema) >>> collection.name 'test_collection_name' """ return self._name @property def is_empty(self) -> bool: """ Whether the collection is empty. This method need to call `num_entities <#pymilvus.Collection.num_entities>`_. :return bool: * True: The collection is empty. * False: The collection is gfghnot empty. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_is_empty", schema) >>> collection.is_empty True >>> collection.insert([[1], [[1.0, 2.0]]]) <pymilvus.search.MutationResult object at 0x7fabaf3e5d50> >>> collection.is_empty False """ return self.num_entities == 0 # read-only @property def num_entities(self) -> int: """ Returns the number of entities in the collection. :return int: Number of entities in the collection. :raises CollectionNotExistException: If the collection does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_num_entities", schema) >>> collection.num_entities 0 >>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]]) >>> collection.num_entities 2 """ conn = self._get_connection() conn.flush([self._name]) status = conn.get_collection_stats(db_name="", collection_name=self._name) return status["row_count"] @property def primary_field(self) -> FieldSchema: """ Returns the primary field of the collection. :return schema.FieldSchema: The primary field of the collection. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("film_length", DataType.INT64, description="length in miniute"), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_primary_field", schema) >>> collection.primary_field.name 'film_id' """ return self._schema.primary_field def drop(self, timeout=None, **kwargs): """ Drops the collection together with its index files. :param timeout: * *timeout* (``float``) -- An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or an error occurs. :raises CollectionNotExistException: If the collection does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_drop", schema) >>> utility.has_collection("test_collection_drop") True >>> collection.drop() >>> utility.has_collection("test_collection_drop") False """ conn = self._get_connection() indexes = self.indexes for index in indexes: index.drop(timeout=timeout, **kwargs) conn.drop_collection(self._name, timeout=timeout, **kwargs) def load(self, partition_names=None, timeout=None, **kwargs): """ Loads the collection from disk to memory. :param partition_names: The specified partitions to load. :type partition_names: list[str] :param timeout:An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or error occurs. :type timeout: float :param kwargs: * *_async* (``bool``) -- Indicate if invoke asynchronously. :raises CollectionNotExistException: If the collection does not exist. :raises ParamError: If the parameters are invalid. :raises BaseException: If the specified field, index or partition does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_load", schema) >>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]]) <pymilvus.search.MutationResult object at 0x7fabaf3e5d50> >>> collection.load() >>> collection.num_entities 2 """ conn = self._get_connection() if partition_names is not None: conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs) else: conn.load_collection(self._name, timeout=timeout, **kwargs) def release(self, timeout=None, **kwargs): """ Releases the collection from memory. :param timeout: * *timeout* (``float``) -- An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or an error occurs. :raises CollectionNotExistException: If collection does not exist. :raises BaseException: If collection has not been loaded to memory. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_release", schema) >>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]]) <pymilvus.search.MutationResult object at 0x7fabaf3e5d50> >>> collection.load() >>> collection.num_entities 2 >>> collection.release() # release the collection from memory """ conn = self._get_connection() conn.release_collection(self._name, timeout=timeout, **kwargs) def insert(self, data, partition_name=None, timeout=None, **kwargs): """ Insert data into the collection. :param data: The specified data to insert, the dimension of data needs to align with column number :type data: list-like(list, tuple) object or pandas.DataFrame :param partition_name: The partition name which the data will be inserted to, if partition name is not passed, then the data will be inserted to "_default" partition :type partition_name: str :param timeout: * *timeout* (``float``) -- An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or an error occurs. :raises CollectionNotExistException: If the specified collection does not exist. :raises ParamError: If input parameters are invalid. :raises BaseException: If the specified partition does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> import random >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f8579002dc0> >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_insert", schema) >>> data = [ ... [random.randint(1, 100) for _ in range(10)], ... [[random.random() for _ in range(2)] for _ in range(10)], ... ] >>> collection.insert(data) >>> collection.num_entities 10 """ if data is None: return MutationResult(data) if not self._check_insert_data_schema(data): raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent) conn = self._get_connection() entities = Prepare.prepare_insert_data(data, self._schema) res = conn.insert(collection_name=self._name, entities=entities, ids=None, partition_name=partition_name, timeout=timeout, **kwargs) if kwargs.get("_async", False): return MutationFuture(res) return MutationResult(res) def delete(self, expr, partition_name=None, timeout=None, **kwargs): """ Delete entities with an expression condition. And return results to show which primary key is deleted successfully :param expr: The expression to specify entities to be deleted :type expr: str :param partition_name: Name of partitions that contain entities :type partition_name: str :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :return: list of ids of the deleted vectors. :rtype: list :raises: RpcError: If gRPC encounter an error ParamError: If parameters are invalid BaseException: If the return result from server is not ok :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> import random >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("film_date", DataType.INT64), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_query", schema) >>> # insert >>> data = [ ... [i for i in range(10)], ... [i + 2000 for i in range(10)], ... [[random.random() for _ in range(2)] for _ in range(10)], ... ] >>> collection.insert(data) >>> collection.num_entities >>> expr = "film_id in [ 0, 1 ]" >>> res = collection.delete(expr) >>> assert len(res) == 2 >>> print(f"- Deleted entities: {res}") - Delete results: [0, 1] """ conn = self._get_connection() res = conn.delete(collection_name=self._name, expr=expr, partition_name=partition_name, timeout=timeout, **kwargs) if kwargs.get("_async", False):
return MutationResult(res) def search(self, data, anns_field, param, limit, expr=None, partition_names=None, output_fields=None, timeout=None, round_decimal=-1, **kwargs): """ Conducts a vector similarity search with an optional boolean expression as filter. :param data: The vectors of search data, the length of data is number of query (nq), the dim of every vector in data must be equal to vector field's of collection. :type data: list[list[float]] :param anns_field: The vector field used to search of collection. :type anns_field: str :param param: The parameters of search, such as ``nprobe``. :type param: dict :param limit: The max number of returned record, also known as ``topk``. :type limit: int :param expr: The boolean expression used to filter attribute. :type expr: str :param partition_names: The names of partitions to search. :type partition_names: list[str] :param output_fields: The fields to return in the search result, not supported now. :type output_fields: list[str] :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur. :type timeout: float :param round_decimal: The specified number of decimal places of returned distance :type round_decimal: int :param kwargs: * *_async* (``bool``) -- Indicate if invoke asynchronously. When value is true, method returns a SearchFuture object; otherwise, method returns results from server directly. * *_callback* (``function``) -- The callback function which is invoked after server response successfully. It functions only if _async is set to True. * *guarantee_timestamp* (``int``) -- This function instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. :return: SearchResult: SearchResult is iterable and is a 2d-array-like class, the first dimension is the number of vectors to query (nq), the second dimension is the number of limit(topk). :rtype: SearchResult :raises RpcError: If gRPC encounter an error. :raises ParamError: If parameters are invalid. :raises DataTypeNotMatchException: If wrong type of param is passed. :raises BaseException: If the return result from server is not ok. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> import random >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f8579002dc0> >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_search", schema) >>> # insert >>> data = [ ... [i for i in range(10)], ... [[random.random() for _ in range(2)] for _ in range(10)], ... ] >>> collection.insert(data) >>> collection.num_entities 10 >>> collection.load() >>> # search >>> search_param = { ... "data": [[1.0, 1.0]], ... "anns_field": "films", ... "param": {"metric_type": "L2"}, ... "limit": 2, ... "expr": "film_id > 0", ... } >>> res = collection.search(**search_param) >>> assert len(res) == 1 >>> hits = res[0] >>> assert len(hits) == 2 >>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ") - Total hits: 2, hits ids: [8, 5] >>> print(f"- Top1 hit id: {hits[0].id}, distance: {hits[0].distance}, score: {hits[0].score} ") - Top1 hit id: 8, distance: 0.10143111646175385, score: 0.10143111646175385 """ if expr is not None and not isinstance(expr, str): raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr)) conn = self._get_connection() res = conn.search(self._name, data, anns_field, param, limit, expr, partition_names, output_fields, timeout, round_decimal, **kwargs) if kwargs.get("_async", False): return SearchFuture(res) return SearchResult(res) def query(self, expr, output_fields=None, partition_names=None, timeout=None): """ Query with a set of criteria, and results in a list of records that match the query exactly. :param expr: The query expression :type expr: str :param output_fields: A list of fields to return :type output_fields: list[str] :param partition_names: Name of partitions that contain entities :type partition_names: list[str] :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :return: A list that contains all results :rtype: list :raises: RpcError: If gRPC encounter an error ParamError: If parameters are invalid DataTypeNotMatchException: If wrong type of param is passed BaseException: If the return result from server is not ok :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> import random >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f8579002dc0> >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("film_date", DataType.INT64), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_query", schema) >>> # insert >>> data = [ ... [i for i in range(10)], ... [i + 2000 for i in range(10)], ... [[random.random() for _ in range(2)] for _ in range(10)], ... ] >>> collection.insert(data) >>> collection.num_entities 10 >>> collection.load() >>> # query >>> expr = "film_id in [ 0, 1 ]" >>> res = collection.query(expr, output_fields=["film_date"]) >>> assert len(res) == 2 >>> print(f"- Query results: {res}") - Query results: [{'film_id': 0, 'film_date': 2000}, {'film_id': 1, 'film_date': 2001}] """ if not isinstance(expr, str): raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr)) conn = self._get_connection() res = conn.query(self._name, expr, output_fields, partition_names, timeout) return res @property def partitions(self) -> list: """ Return all partitions of the collection. :return list[Partition]: List of Partition object, return when operation is successful. :raises CollectionNotExistException: If collection doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f8579002dc0> >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_partitions", schema) >>> collection.partitions [{"name": "_default", "description": "", "num_entities": 0}] """ conn = self._get_connection() partition_strs = conn.list_partitions(self._name) partitions = [] for partition in partition_strs: partitions.append(Partition(self, partition, construct_only=True)) return partitions def partition(self, partition_name) -> Partition: """ Return the partition corresponding to name. Return None if not existed. :param partition_name: The name of the partition to get. :type partition_name: str :return Partition: Partition object corresponding to partition_name. :raises CollectionNotExistException: If collection doesn't exist. :raises BaseException: If partition doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() <pymilvus.client.stub.Milvus object at 0x7f8579002dc0> >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_partition", schema) >>> collection.partition("_default") {"name": "_default", "description": "", "num_entities": 0} >>> collection.partition("partition") """ if self.has_partition(partition_name) is False: return None return Partition(self, partition_name, construct_only=True) def create_partition(self, partition_name, description=""): """ Create the partition corresponding to name if not existed. :param partition_name: The name of the partition to create. :type partition_name: str :param description: The description of the partition corresponding to name. :type description: str :return Partition: Partition object corresponding to partition_name. :raises CollectionNotExistException: If collection doesn't exist. :raises BaseException: If partition doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_create_partition", schema) >>> collection.create_partition("comedy", description="comedy films") {"name": "comedy", "description": "comedy films", "num_entities": 0} >>> collection.partition("comedy") {"name": "partition", "description": "comedy films", "num_entities": 0} """ if self.has_partition(partition_name) is True: raise PartitionAlreadyExistException(0, ExceptionsMessage.PartitionAlreadyExist) return Partition(self, partition_name, description=description) def has_partition(self, partition_name, timeout=None) -> bool: """ Checks if a specified partition exists. :param partition_name: The name of the partition to check :type partition_name: str :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :return bool: Whether a specified partition exists. :raises CollectionNotExistException: If collection doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_has_partition", schema) >>> collection.create_partition("comedy", description="comedy films") {"name": "comedy", "description": "comedy films", "num_entities": 0} >>> collection.has_partition("comedy") True >>> collection.has_partition("science_fiction") False """ conn = self._get_connection() return conn.has_partition(self._name, partition_name, timeout=timeout) def drop_partition(self, partition_name, timeout=None, **kwargs): """ Drop the partition and its corresponding index files. :param partition_name: The name of the partition to drop. :type partition_name: str :param timeout: * *timeout* (``float``) -- An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or an error occurs. :raises CollectionNotExistException: If collection doesn't exist. :raises BaseException: If partition doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_drop_partition", schema) >>> collection.create_partition("comedy", description="comedy films") {"name": "comedy", "description": "comedy films", "num_entities": 0} >>> collection.has_partition("comedy") True >>> collection.drop_partition("comedy") >>> collection.has_partition("comedy") False """ if self.has_partition(partition_name) is False: raise PartitionNotExistException(0, ExceptionsMessage.PartitionNotExist) conn = self._get_connection() return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs) # The server side not yet finished to return aliases by the describe_collection api. # Disable this property until the work is done. # @property # def aliases(self) -> list: # """ # Returns alias list of the collection. # # :return list of str: # The collection aliases, returned when the operation succeeds. # # :example: # >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType # >>> connections.connect() # >>> fields = [ # ... FieldSchema("film_id", DataType.INT64, is_primary=True), # ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128) # ... ] # >>> schema = CollectionSchema(fields) # >>> collection = Collection("test_collection_name", schema) # >>> collection.create_alias("tom") # >>> collection.alias # ['tom'] # """ # conn = self._get_connection() # has = conn.has_collection(self._name) # aliases = [] # if has: # resp = conn.describe_collection(self._name) # aliases = resp['aliases'] # return aliases @property def indexes(self) -> list: """ Returns all indexes of the collection. :return list[Index]: List of Index objects, returned when this operation is successful. :raises CollectionNotExistException: If the collection does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_indexes", schema) >>> collection.indexes [] """ conn = self._get_connection() indexes = [] tmp_index = conn.describe_index(self._name) if tmp_index is not None: field_name = tmp_index.pop("field_name", None) indexes.append(Index(self, field_name, tmp_index, construct_only=True)) return indexes def index(self) -> Index: """ Fetches the index object of the of the specified name. :return Index: Index object corresponding to index_name. :raises CollectionNotExistException: If the collection does not exist. :raises BaseException: If the specified index does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_index", schema) >>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} >>> collection.create_index("films", index) Status(code=0, message='') >>> collection.indexes [<pymilvus.index.Index object at 0x7f4435587e20>] >>> collection.index() <pymilvus.index.Index object at 0x7f44355a1460> """ conn = self._get_connection() tmp_index = conn.describe_index(self._name) if tmp_index is not None: field_name = tmp_index.pop("field_name", None) return Index(self, field_name, tmp_index, construct_only=True) raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist) def create_index(self, field_name, index_params, timeout=None, **kwargs) -> Index: """ Creates index for a specified field. Return Index Object. :param field_name: The name of the field to create an index for. :type field_name: str :param index_params: The indexing parameters. :type index_params: dict :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :raises CollectionNotExistException: If the collection does not exist. :raises ParamError: If the index parameters are invalid. :raises BaseException: If field does not exist. :raises BaseException: If the index has been created. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_create_index", schema) >>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} >>> collection.create_index("films", index) Status(code=0, message='') >>> collection.index() <pymilvus.index.Index object at 0x7f44355a1460> """ conn = self._get_connection() return conn.create_index(self._name, field_name, index_params, timeout=timeout, **kwargs) def has_index(self, timeout=None) -> bool: """ Checks whether a specified index exists. :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :return bool: Whether the specified index exists. :raises CollectionNotExistException: If the collection does not exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_has_index", schema) >>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} >>> collection.create_index("films", index) >>> collection.has_index() True """ conn = self._get_connection() # TODO(yukun): Need field name, but provide index name if conn.describe_index(self._name, "", timeout=timeout) is None: return False return True def drop_index(self, timeout=None, **kwargs): """ Drop index and its corresponding index files. :param timeout: * *timeout* (``float``) -- An optional duration of time in seconds to allow for the RPC. If timeout is set to None, the client keeps waiting until the server responds or an error occurs. Optional. A duration of time in seconds. :raises CollectionNotExistException: If the collection does not exist. :raises BaseException: If the index does not exist or has been dropped. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_has_index", schema) >>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} >>> collection.create_index("films", index) >>> collection.has_index() True >>> collection.drop_index() >>> collection.has_index() False """ if self.has_index() is False: raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist) conn = self._get_connection() tmp_index = conn.describe_index(self._name, "") if tmp_index is not None: index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True) index.drop(timeout=timeout, **kwargs) def create_alias(self, alias, timeout=None, **kwargs): """ Specify alias for a collection. Alias cannot be duplicated, you can't assign same alias to different collections. But you can specify multiple aliases for a collection, for example: before create_alias("collection_1", "bob"): collection_1's aliases = ["tom"] after create_alias("collection_1", "bob"): collection_1's aliases = ["tom", "bob"] :param alias: The alias of the collection. :type alias: str. :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :raises CollectionNotExistException: If the collection does not exist. :raises BaseException: If the alias failed to create. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_create_index", schema) >>> collection.create_alias("alias") Status(code=0, message='') """ conn = self._get_connection() conn.create_alias(self._name, alias, timeout=timeout, **kwargs) def drop_alias(self, alias, timeout=None, **kwargs): """ Delete an alias. This api no need to specify collection name because the milvus server knows which collection it belongs. For example: before drop_alias("bob"): collection_1's aliases = ["tom", "bob"] after drop_alias("bob"): collection_1's aliases = ["tom"] :param alias: The alias of the collection. :type alias: str. :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :raises CollectionNotExistException: If the collection does not exist. :raises BaseException: If the alias doesn't exist. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_create_index", schema) >>> collection.create_alias("alias") >>> collection.drop_alias("alias") Status(code=0, message='') """ conn = self._get_connection() conn.drop_alias(alias, timeout=timeout, **kwargs) def alter_alias(self, alias, timeout=None, **kwargs): """ Change alias of a collection to another collection. If the alias doesn't exist, the api will return error. Alias cannot be duplicated, you can't assign same alias to different collections. This api can change alias owner collection, for example: before alter_alias("collection_2", "bob"): collection_1's aliases = ["bob"] collection_2's aliases = [] after alter_alias("collection_2", "bob"): collection_1's aliases = [] collection_2's aliases = ["bob"] :param alias: The alias of the collection. :type alias: str. :param timeout: An optional duration of time in seconds to allow for the RPC. When timeout is set to None, client waits until server response or error occur :type timeout: float :raises CollectionNotExistException: If the collection does not exist. :raises BaseException: If the alias failed to alter. :example: >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType >>> connections.connect() >>> schema = CollectionSchema([ ... FieldSchema("film_id", DataType.INT64, is_primary=True), ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2) ... ]) >>> collection = Collection("test_collection_create_index", schema) >>> collection.alter_alias("alias") if the alias exists, return Status(code=0, message='') otherwise return Status(code=1, message='alias does not exist') """ conn = self._get_connection() conn.alter_alias(self._name, alias, timeout=timeout, **kwargs)
return MutationFuture(res)
index.d.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /// <amd-module name="@nguniversal/builders/src/static-generator/index" />
/** * Builds the browser and server, then renders each route in options.routes * and writes them to prerender/<route>/index.html for each output path in * the browser result. */ export declare function execute(options: PrerenderBuilderOptions, context: BuilderContext): Promise<BuilderOutput>; declare const _default: import("@angular-devkit/architect/src/internal").Builder<PrerenderBuilderOptions & import("@angular-devkit/core").JsonObject>; export default _default;
import { BuilderContext, BuilderOutput } from '@angular-devkit/architect'; import { Schema as PrerenderBuilderOptions } from './schema';
item.rs
use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error}; use super::ty::{AllowPlus, RecoverQPath}; use super::{FollowedByType, Parser, PathStyle}; use crate::maybe_whole; use rustc_ast::ast::{self, AttrStyle, AttrVec, Attribute, Ident, DUMMY_NODE_ID}; use rustc_ast::ast::{AssocItem, AssocItemKind, ForeignItemKind, Item, ItemKind, Mod}; use rustc_ast::ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind}; use rustc_ast::ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind}; use rustc_ast::ast::{EnumDef, Generics, StructField, TraitRef, Ty, TyKind, Variant, VariantData}; use rustc_ast::ast::{FnHeader, ForeignItem, PathSegment, Visibility, VisibilityKind}; use rustc_ast::ast::{MacArgs, MacCall, MacDelimiter}; use rustc_ast::ptr::P; use rustc_ast::token::{self, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast_pretty::pprust; use rustc_errors::{struct_span_err, Applicability, PResult, StashKey}; use rustc_span::edition::Edition; use rustc_span::source_map::{self, Span}; use rustc_span::symbol::{kw, sym, Symbol}; use log::debug; use std::convert::TryFrom; use std::mem; impl<'a> Parser<'a> { /// Parses a source module as a crate. This is the main entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<'a, ast::Crate> { let lo = self.token.span; let (module, attrs) = self.parse_mod(&token::Eof)?; let span = lo.to(self.token.span); let proc_macros = Vec::new(); // Filled in by `proc_macro_harness::inject()`. Ok(ast::Crate { attrs, module, span, proc_macros }) } /// Parses a `mod <foo> { ... }` or `mod <foo>;` item. fn parse_item_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let (module, mut inner_attrs) = if self.eat(&token::Semi) { Default::default() } else { self.expect(&token::OpenDelim(token::Brace))?; self.parse_mod(&token::CloseDelim(token::Brace))? }; attrs.append(&mut inner_attrs); Ok((id, ItemKind::Mod(module))) } /// Parses the contents of a module (inner attributes followed by module items). pub fn parse_mod(&mut self, term: &TokenKind) -> PResult<'a, (Mod, Vec<Attribute>)> { let lo = self.token.span; let attrs = self.parse_inner_attributes()?; let module = self.parse_mod_items(term, lo)?; Ok((module, attrs)) } /// Given a termination token, parses all of the items in a module. fn parse_mod_items(&mut self, term: &TokenKind, inner_lo: Span) -> PResult<'a, Mod> { let mut items = vec![]; while let Some(item) = self.parse_item()? { items.push(item); self.maybe_consume_incorrect_semicolon(&items); } if !self.eat(term) { let token_str = super::token_descr(&self.token); if !self.maybe_consume_incorrect_semicolon(&items) { let msg = &format!("expected item, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected item"); return Err(err); } } let hi = if self.token.span.is_dummy() { inner_lo } else { self.prev_token.span }; Ok(Mod { inner: inner_lo.to(hi), items, inline: true }) } } pub(super) type ItemInfo = (Ident, ItemKind); impl<'a> Parser<'a> { pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> { self.parse_item_(|_| true).map(|i| i.map(P)) } fn parse_item_(&mut self, req_name: ReqName) -> PResult<'a, Option<Item>> { let attrs = self.parse_outer_attributes()?; self.parse_item_common(attrs, true, false, req_name) } pub(super) fn parse_item_common( &mut self, mut attrs: Vec<Attribute>, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, ) -> PResult<'a, Option<Item>> { maybe_whole!(self, NtItem, |item| { let mut item = item; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); Some(item.into_inner()) }); let mut unclosed_delims = vec![]; let (mut item, tokens) = self.collect_tokens(|this| { let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name); unclosed_delims.append(&mut this.unclosed_delims); item })?; self.unclosed_delims.append(&mut unclosed_delims); // Once we've parsed an item and recorded the tokens we got while // parsing we may want to store `tokens` into the item we're about to // return. Note, though, that we specifically didn't capture tokens // related to outer attributes. The `tokens` field here may later be // used with procedural macros to convert this item back into a token // stream, but during expansion we may be removing attributes as we go // along. // // If we've got inner attributes then the `tokens` we've got above holds // these inner attributes. If an inner attribute is expanded we won't // actually remove it from the token stream, so we'll just keep yielding // it (bad!). To work around this case for now we just avoid recording // `tokens` if we detect any inner attributes. This should help keep // expansion correct, but we should fix this bug one day! if let Some(item) = &mut item { if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } } Ok(item) } fn parse_item_common_( &mut self, mut attrs: Vec<Attribute>, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, ) -> PResult<'a, Option<Item>> { let lo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; let mut def = self.parse_defaultness(); let kind = self.parse_item_kind(&mut attrs, mac_allowed, lo, &vis, &mut def, req_name)?; if let Some((ident, kind)) = kind { self.error_on_unconsumed_default(def, &kind); let span = lo.to(self.prev_token.span); let id = DUMMY_NODE_ID; let item = Item { ident, attrs, id, kind, vis, span, tokens: None }; return Ok(Some(item)); } // At this point, we have failed to parse an item. self.error_on_unmatched_vis(&vis); self.error_on_unmatched_defaultness(def); if !attrs_allowed { self.recover_attrs_no_item(&attrs)?; } Ok(None) } /// Error in-case a non-inherited visibility was parsed but no item followed. fn error_on_unmatched_vis(&self, vis: &Visibility) { if let VisibilityKind::Inherited = vis.node { return; } let vs = pprust::vis_to_string(&vis); let vs = vs.trim_end(); self.struct_span_err(vis.span, &format!("visibility `{}` is not followed by an item", vs)) .span_label(vis.span, "the visibility") .help(&format!("you likely meant to define an item, e.g., `{} fn foo() {{}}`", vs)) .emit(); } /// Error in-case a `default` was parsed but no item followed. fn error_on_unmatched_defaultness(&self, def: Defaultness) { if let Defaultness::Default(sp) = def { self.struct_span_err(sp, "`default` is not followed by an item") .span_label(sp, "the `default` qualifier") .note("only `fn`, `const`, `type`, or `impl` items may be prefixed by `default`") .emit(); } } /// Error in-case `default` was parsed in an in-appropriate context. fn error_on_unconsumed_default(&self, def: Defaultness, kind: &ItemKind) { if let Defaultness::Default(span) = def { let msg = format!("{} {} cannot be `default`", kind.article(), kind.descr()); self.struct_span_err(span, &msg) .span_label(span, "`default` because of this") .note("only associated `fn`, `const`, and `type` items can be `default`") .emit(); } } /// Parses one of the items allowed by the flags. fn parse_item_kind( &mut self, attrs: &mut Vec<Attribute>, macros_allowed: bool, lo: Span, vis: &Visibility, def: &mut Defaultness, req_name: ReqName, ) -> PResult<'a, Option<ItemInfo>> { let mut def = || mem::replace(def, Defaultness::Final); let info = if self.eat_keyword(kw::Use) { // USE ITEM let tree = self.parse_use_tree()?; self.expect_semi()?; (Ident::invalid(), ItemKind::Use(P(tree))) } else if self.check_fn_front_matter() { // FUNCTION ITEM let (ident, sig, generics, body) = self.parse_fn(attrs, req_name)?; (ident, ItemKind::Fn(def(), sig, generics, body)) } else if self.eat_keyword(kw::Extern) { if self.eat_keyword(kw::Crate) { // EXTERN CRATE self.parse_item_extern_crate()? } else { // EXTERN BLOCK self.parse_item_foreign_mod(attrs)? } } else if self.is_static_global() { // STATIC ITEM self.bump(); // `static` let m = self.parse_mutability(); let (ident, ty, expr) = self.parse_item_global(Some(m))?; (ident, ItemKind::Static(ty, m, expr)) } else if let Const::Yes(const_span) = self.parse_constness() { // CONST ITEM self.recover_const_mut(const_span); let (ident, ty, expr) = self.parse_item_global(None)?; (ident, ItemKind::Const(def(), ty, expr)) } else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() { // TRAIT ITEM self.parse_item_trait(attrs, lo)? } else if self.check_keyword(kw::Impl) || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Impl]) { // IMPL ITEM self.parse_item_impl(attrs, def())? } else if self.eat_keyword(kw::Mod) { // MODULE ITEM self.parse_item_mod(attrs)? } else if self.eat_keyword(kw::Type) { // TYPE ITEM self.parse_type_alias(def())? } else if self.eat_keyword(kw::Enum) { // ENUM ITEM self.parse_item_enum()? } else if self.eat_keyword(kw::Struct) { // STRUCT ITEM self.parse_item_struct()? } else if self.is_kw_followed_by_ident(kw::Union) { // UNION ITEM self.bump(); // `union` self.parse_item_union()? } else if self.eat_keyword(kw::Macro) { // MACROS 2.0 ITEM self.parse_item_decl_macro(lo)? } else if self.is_macro_rules_item() { // MACRO_RULES ITEM self.parse_item_macro_rules(vis)? } else if vis.node.is_pub() && self.isnt_macro_invocation() { self.recover_missing_kw_before_item()?; return Ok(None); } else if macros_allowed && self.check_path() { // MACRO INVOCATION ITEM (Ident::invalid(), ItemKind::MacCall(self.parse_item_macro(vis)?)) } else { return Ok(None); }; Ok(Some(info)) } /// When parsing a statement, would the start of a path be an item? pub(super) fn is_path_start_item(&mut self) -> bool { self.is_crate_vis() // no: `crate::b`, yes: `crate $item` || self.is_kw_followed_by_ident(kw::Union) // no: `union::b`, yes: `union U { .. }` || self.check_auto_or_unsafe_trait_item() // no: `auto::b`, yes: `auto trait X { .. }` || self.is_async_fn() // no(2015): `async::b`, yes: `async fn` || self.is_macro_rules_item() // no: `macro_rules::b`, yes: `macro_rules! mac` } /// Are we sure this could not possibly be a macro invocation? fn isnt_macro_invocation(&mut self) -> bool { self.check_ident() && self.look_ahead(1, |t| *t != token::Not && *t != token::ModSep) } /// Recover on encountering a struct or method definition where the user /// forgot to add the `struct` or `fn` keyword after writing `pub`: `pub S {}`. fn recover_missing_kw_before_item(&mut self) -> PResult<'a, ()>
/// Parses an item macro, e.g., `item!();`. fn parse_item_macro(&mut self, vis: &Visibility) -> PResult<'a, MacCall> { let path = self.parse_path(PathStyle::Mod)?; // `foo::bar` self.expect(&token::Not)?; // `!` let args = self.parse_mac_args()?; // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`. self.eat_semi_for_macro_if_needed(&args); self.complain_if_pub_macro(vis, false); Ok(MacCall { path, args, prior_type_ascription: self.last_type_ascription }) } /// Recover if we parsed attributes and expected an item but there was none. fn recover_attrs_no_item(&mut self, attrs: &[Attribute]) -> PResult<'a, ()> { let (start, end) = match attrs { [] => return Ok(()), [x0 @ xn] | [x0, .., xn] => (x0, xn), }; let msg = if end.is_doc_comment() { "expected item after doc comment" } else { "expected item after attributes" }; let mut err = self.struct_span_err(end.span, msg); if end.is_doc_comment() { err.span_label(end.span, "this doc comment doesn't document anything"); } if let [.., penultimate, _] = attrs { err.span_label(start.span.to(penultimate.span), "other attributes here"); } Err(err) } fn is_async_fn(&self) -> bool { self.token.is_keyword(kw::Async) && self.is_keyword_ahead(1, &[kw::Fn]) } fn parse_polarity(&mut self) -> ast::ImplPolarity { // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative(self.prev_token.span) } else { ast::ImplPolarity::Positive } } /// Parses an implementation item. /// /// ``` /// impl<'a, T> TYPE { /* impl items */ } /// impl<'a, T> TRAIT for TYPE { /* impl items */ } /// impl<'a, T> !TRAIT for TYPE { /* impl items */ } /// impl<'a, T> const TRAIT for TYPE { /* impl items */ } /// ``` /// /// We actually parse slightly more relaxed grammar for better error reporting and recovery. /// ``` /// "impl" GENERICS "const"? "!"? TYPE "for"? (TYPE | "..") ("where" PREDICATES)? "{" BODY "}" /// "impl" GENERICS "const"? "!"? TYPE ("where" PREDICATES)? "{" BODY "}" /// ``` fn parse_item_impl( &mut self, attrs: &mut Vec<Attribute>, defaultness: Defaultness, ) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); self.expect_keyword(kw::Impl)?; // First, parse generic parameters if necessary. let mut generics = if self.choose_generics_over_qpath(0) { self.parse_generics()? } else { let mut generics = Generics::default(); // impl A for B {} // /\ this is where `generics.span` should point when there are no type params. generics.span = self.prev_token.span.shrink_to_hi(); generics }; let constness = self.parse_constness(); if let Const::Yes(span) = constness { self.sess.gated_spans.gate(sym::const_trait_impl, span); } let polarity = self.parse_polarity(); // Parse both types and traits as a type, then reinterpret if necessary. let err_path = |span| ast::Path::from_ident(Ident::new(kw::Invalid, span)); let ty_first = if self.token.is_keyword(kw::For) && self.look_ahead(1, |t| t != &token::Lt) { let span = self.prev_token.span.between(self.token.span); self.struct_span_err(span, "missing trait in a trait impl").emit(); P(Ty { kind: TyKind::Path(None, err_path(span)), span, id: DUMMY_NODE_ID }) } else { self.parse_ty()? }; // If `for` is missing we try to recover. let has_for = self.eat_keyword(kw::For); let missing_for_span = self.prev_token.span.between(self.token.span); let ty_second = if self.token == token::DotDot { // We need to report this error after `cfg` expansion for compatibility reasons self.bump(); // `..`, do not add it to expected tokens Some(self.mk_ty(self.prev_token.span, TyKind::Err)) } else if has_for || self.token.can_begin_type() { Some(self.parse_ty()?) } else { None }; generics.where_clause = self.parse_where_clause()?; let impl_items = self.parse_item_list(attrs, |p| p.parse_impl_item())?; let item_kind = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { self.struct_span_err(missing_for_span, "missing `for` in a trait impl") .span_suggestion_short( missing_for_span, "add `for` here", " for ".to_string(), Applicability::MachineApplicable, ) .emit(); } let ty_first = ty_first.into_inner(); let path = match ty_first.kind { // This notably includes paths passed through `ty` macro fragments (#46438). TyKind::Path(None, path) => path, _ => { self.struct_span_err(ty_first.span, "expected a trait, found type").emit(); err_path(ty_first.span) } }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; ItemKind::Impl { unsafety, polarity, defaultness, constness, generics, of_trait: Some(trait_ref), self_ty: ty_second, items: impl_items, } } None => { // impl Type ItemKind::Impl { unsafety, polarity, defaultness, constness, generics, of_trait: None, self_ty: ty_first, items: impl_items, } } }; Ok((Ident::invalid(), item_kind)) } fn parse_item_list<T>( &mut self, attrs: &mut Vec<Attribute>, mut parse_item: impl FnMut(&mut Parser<'a>) -> PResult<'a, Option<Option<T>>>, ) -> PResult<'a, Vec<T>> { let open_brace_span = self.token.span; self.expect(&token::OpenDelim(token::Brace))?; attrs.append(&mut self.parse_inner_attributes()?); let mut items = Vec::new(); while !self.eat(&token::CloseDelim(token::Brace)) { if self.recover_doc_comment_before_brace() { continue; } match parse_item(self) { Ok(None) => { // We have to bail or we'll potentially never make progress. let non_item_span = self.token.span; self.consume_block(token::Brace, ConsumeClosingDelim::Yes); self.struct_span_err(non_item_span, "non-item in item list") .span_label(open_brace_span, "item list starts here") .span_label(non_item_span, "non-item starts here") .span_label(self.prev_token.span, "item list ends here") .emit(); break; } Ok(Some(item)) => items.extend(item), Err(mut err) => { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); err.span_label(open_brace_span, "while parsing this item list starting here") .span_label(self.prev_token.span, "the item list ends here") .emit(); break; } } } Ok(items) } /// Recover on a doc comment before `}`. fn recover_doc_comment_before_brace(&mut self) -> bool { if let token::DocComment(_) = self.token.kind { if self.look_ahead(1, |tok| tok == &token::CloseDelim(token::Brace)) { struct_span_err!( self.diagnostic(), self.token.span, E0584, "found a documentation comment that doesn't document anything", ) .span_label(self.token.span, "this doc comment doesn't document anything") .help( "doc comments must come before what they document, maybe a \ comment was intended with `//`?", ) .emit(); self.bump(); return true; } } false } /// Parses defaultness (i.e., `default` or nothing). fn parse_defaultness(&mut self) -> Defaultness { // We are interested in `default` followed by another identifier. // However, we must avoid keywords that occur as binary operators. // Currently, the only applicable keyword is `as` (`default as Ty`). if self.check_keyword(kw::Default) && self.look_ahead(1, |t| t.is_non_raw_ident_where(|i| i.name != kw::As)) { self.bump(); // `default` Defaultness::Default(self.prev_token.uninterpolated_span()) } else { Defaultness::Final } } /// Is this an `(unsafe auto? | auto) trait` item? fn check_auto_or_unsafe_trait_item(&mut self) -> bool { // auto trait self.check_keyword(kw::Auto) && self.is_keyword_ahead(1, &[kw::Trait]) // unsafe auto trait || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Trait, kw::Auto]) } /// Parses `unsafe? auto? trait Foo { ... }` or `trait Foo = Bar;`. fn parse_item_trait(&mut self, attrs: &mut Vec<Attribute>, lo: Span) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); // Parse optional `auto` prefix. let is_auto = if self.eat_keyword(kw::Auto) { IsAuto::Yes } else { IsAuto::No }; self.expect_keyword(kw::Trait)?; let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; // Parse optional colon and supertrait bounds. let had_colon = self.eat(&token::Colon); let span_at_colon = self.prev_token.span; let bounds = if had_colon { self.parse_generic_bounds(Some(self.prev_token.span))? } else { Vec::new() }; let span_before_eq = self.prev_token.span; if self.eat(&token::Eq) { // It's a trait alias. if had_colon { let span = span_at_colon.to(span_before_eq); self.struct_span_err(span, "bounds are not allowed on trait aliases").emit(); } let bounds = self.parse_generic_bounds(None)?; tps.where_clause = self.parse_where_clause()?; self.expect_semi()?; let whole_span = lo.to(self.prev_token.span); if is_auto == IsAuto::Yes { let msg = "trait aliases cannot be `auto`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } if let Unsafe::Yes(_) = unsafety { let msg = "trait aliases cannot be `unsafe`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } self.sess.gated_spans.gate(sym::trait_alias, whole_span); Ok((ident, ItemKind::TraitAlias(tps, bounds))) } else { // It's a normal trait. tps.where_clause = self.parse_where_clause()?; let items = self.parse_item_list(attrs, |p| p.parse_trait_item())?; Ok((ident, ItemKind::Trait(is_auto, unsafety, tps, bounds, items))) } } pub fn parse_impl_item(&mut self) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|_| true) } pub fn parse_trait_item(&mut self) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|edition| edition >= Edition::Edition2018) } /// Parses associated items. fn parse_assoc_item(&mut self, req_name: ReqName) -> PResult<'a, Option<Option<P<AssocItem>>>> { Ok(self.parse_item_(req_name)?.map(|Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match AssocItemKind::try_from(kind) { Ok(kind) => kind, Err(kind) => match kind { ItemKind::Static(a, _, b) => { self.struct_span_err(span, "associated `static` items are not allowed") .emit(); AssocItemKind::Const(Defaultness::Final, a, b) } _ => return self.error_bad_item_kind(span, &kind, "`trait`s or `impl`s"), }, }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) })) } /// Parses a `type` alias with the following grammar: /// ``` /// TypeAlias = "type" Ident Generics {":" GenericBounds}? {"=" Ty}? ";" ; /// ``` /// The `"type"` has already been eaten. fn parse_type_alias(&mut self, def: Defaultness) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds(None)? } else { Vec::new() }; generics.where_clause = self.parse_where_clause()?; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; self.expect_semi()?; Ok((ident, ItemKind::TyAlias(def, generics, bounds, default))) } /// Parses a `UseTree`. /// /// ``` /// USE_TREE = [`::`] `*` | /// [`::`] `{` USE_TREE_LIST `}` | /// PATH `::` `*` | /// PATH `::` `{` USE_TREE_LIST `}` | /// PATH [`as` IDENT] /// ``` fn parse_use_tree(&mut self) -> PResult<'a, UseTree> { let lo = self.token.span; let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo() }; let kind = if self.check(&token::OpenDelim(token::Brace)) || self.check(&token::BinOp(token::Star)) || self.is_import_coupler() { // `use *;` or `use ::*;` or `use {...};` or `use ::{...};` let mod_sep_ctxt = self.token.span.ctxt(); if self.eat(&token::ModSep) { prefix .segments .push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt))); } self.parse_use_tree_glob_or_nested()? } else { // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;` prefix = self.parse_path(PathStyle::Mod)?; if self.eat(&token::ModSep) { self.parse_use_tree_glob_or_nested()? } else { UseTreeKind::Simple(self.parse_rename()?, DUMMY_NODE_ID, DUMMY_NODE_ID) } }; Ok(UseTree { prefix, kind, span: lo.to(self.prev_token.span) }) } /// Parses `*` or `{...}`. fn parse_use_tree_glob_or_nested(&mut self) -> PResult<'a, UseTreeKind> { Ok(if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) }) } /// Parses a `UseTreeKind::Nested(list)`. /// /// ``` /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`] /// ``` fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> { self.parse_delim_comma_seq(token::Brace, |p| Ok((p.parse_use_tree()?, DUMMY_NODE_ID))) .map(|(r, _)| r) } fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> { if self.eat_keyword(kw::As) { self.parse_ident_or_underscore().map(Some) } else { Ok(None) } } fn parse_ident_or_underscore(&mut self) -> PResult<'a, ast::Ident> { match self.token.ident() { Some((ident @ Ident { name: kw::Underscore, .. }, false)) => { self.bump(); Ok(ident) } _ => self.parse_ident(), } } /// Parses `extern crate` links. /// /// # Examples /// /// ``` /// extern crate foo; /// extern crate bar as foo; /// ``` fn parse_item_extern_crate(&mut self) -> PResult<'a, ItemInfo> { // Accept `extern crate name-like-this` for better diagnostics let orig_name = self.parse_crate_name_with_dashes()?; let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? { (rename, Some(orig_name.name)) } else { (orig_name, None) }; self.expect_semi()?; Ok((item_name, ItemKind::ExternCrate(orig_name))) } fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, ast::Ident> { let error_msg = "crate name using dashes are not valid in `extern crate` statements"; let suggestion_msg = "if the original crate name uses dashes you need to use underscores \ in the code"; let mut ident = if self.token.is_keyword(kw::SelfLower) { self.parse_path_segment_ident() } else { self.parse_ident() }?; let mut idents = vec![]; let mut replacement = vec![]; let mut fixed_crate_name = false; // Accept `extern crate name-like-this` for better diagnostics. let dash = token::BinOp(token::BinOpToken::Minus); if self.token == dash { // Do not include `-` as part of the expected tokens list. while self.eat(&dash) { fixed_crate_name = true; replacement.push((self.prev_token.span, "_".to_string())); idents.push(self.parse_ident()?); } } if fixed_crate_name { let fixed_name_sp = ident.span.to(idents.last().unwrap().span); let mut fixed_name = format!("{}", ident.name); for part in idents { fixed_name.push_str(&format!("_{}", part.name)); } ident = Ident::from_str_and_span(&fixed_name, fixed_name_sp); self.struct_span_err(fixed_name_sp, error_msg) .span_label(fixed_name_sp, "dash-separated idents are not valid") .multipart_suggestion(suggestion_msg, replacement, Applicability::MachineApplicable) .emit(); } Ok(ident) } /// Parses `extern` for foreign ABIs modules. /// /// `extern` is expected to have been consumed before calling this method. /// /// # Examples /// /// ```ignore (only-for-syntax-highlight) /// extern "C" {} /// extern {} /// ``` fn parse_item_foreign_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> { let abi = self.parse_abi(); // ABI? let items = self.parse_item_list(attrs, |p| p.parse_foreign_item())?; let module = ast::ForeignMod { abi, items }; Ok((Ident::invalid(), ItemKind::ForeignMod(module))) } /// Parses a foreign item (one in an `extern { ... }` block). pub fn parse_foreign_item(&mut self) -> PResult<'a, Option<Option<P<ForeignItem>>>> { Ok(self.parse_item_(|_| true)?.map(|Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match ForeignItemKind::try_from(kind) { Ok(kind) => kind, Err(kind) => match kind { ItemKind::Const(_, a, b) => { self.error_on_foreign_const(span, ident); ForeignItemKind::Static(a, Mutability::Not, b) } _ => return self.error_bad_item_kind(span, &kind, "`extern` blocks"), }, }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) })) } fn error_bad_item_kind<T>(&self, span: Span, kind: &ItemKind, ctx: &str) -> Option<T> { let span = self.sess.source_map().guess_head_span(span); let descr = kind.descr(); self.struct_span_err(span, &format!("{} is not supported in {}", descr, ctx)) .help(&format!("consider moving the {} out to a nearby module scope", descr)) .emit(); None } fn error_on_foreign_const(&self, span: Span, ident: Ident) { self.struct_span_err(ident.span, "extern items cannot be `const`") .span_suggestion( span.with_hi(ident.span.lo()), "try using a static value", "static ".to_string(), Applicability::MachineApplicable, ) .note("for more information, visit https://doc.rust-lang.org/std/keyword.extern.html") .emit(); } fn is_static_global(&mut self) -> bool { if self.check_keyword(kw::Static) { // Check if this could be a closure. !self.look_ahead(1, |token| { if token.is_keyword(kw::Move) { return true; } match token.kind { token::BinOp(token::Or) | token::OrOr => true, _ => false, } }) } else { false } } /// Recover on `const mut` with `const` already eaten. fn recover_const_mut(&mut self, const_span: Span) { if self.eat_keyword(kw::Mut) { let span = self.prev_token.span; self.struct_span_err(span, "const globals cannot be mutable") .span_label(span, "cannot be mutable") .span_suggestion( const_span, "you might want to declare a static instead", "static".to_owned(), Applicability::MaybeIncorrect, ) .emit(); } } /// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with /// `["const" | ("static" "mut"?)]` already parsed and stored in `m`. /// /// When `m` is `"const"`, `$ident` may also be `"_"`. fn parse_item_global( &mut self, m: Option<Mutability>, ) -> PResult<'a, (Ident, P<Ty>, Option<P<ast::Expr>>)> { let id = if m.is_none() { self.parse_ident_or_underscore() } else { self.parse_ident() }?; // Parse the type of a `const` or `static mut?` item. // That is, the `":" $ty` fragment. let ty = if self.eat(&token::Colon) { self.parse_ty()? } else { self.recover_missing_const_type(id, m) }; let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None }; self.expect_semi()?; Ok((id, ty, expr)) } /// We were supposed to parse `:` but the `:` was missing. /// This means that the type is missing. fn recover_missing_const_type(&mut self, id: Ident, m: Option<Mutability>) -> P<Ty> { // Construct the error and stash it away with the hope // that typeck will later enrich the error with a type. let kind = match m { Some(Mutability::Mut) => "static mut", Some(Mutability::Not) => "static", None => "const", }; let mut err = self.struct_span_err(id.span, &format!("missing type for `{}` item", kind)); err.span_suggestion( id.span, "provide a type for the item", format!("{}: <type>", id), Applicability::HasPlaceholders, ); err.stash(id.span, StashKey::ItemNoType); // The user intended that the type be inferred, // so treat this as if the user wrote e.g. `const A: _ = expr;`. P(Ty { kind: TyKind::Infer, span: id.span, id: ast::DUMMY_NODE_ID }) } /// Parses an enum declaration. fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let mut generics = self.parse_generics()?; generics.where_clause = self.parse_where_clause()?; let (variants, _) = self.parse_delim_comma_seq(token::Brace, |p| p.parse_enum_variant()).map_err(|e| { self.recover_stmt(); e })?; let enum_definition = EnumDef { variants: variants.into_iter().filter_map(|v| v).collect() }; Ok((id, ItemKind::Enum(enum_definition, generics))) } fn parse_enum_variant(&mut self) -> PResult<'a, Option<Variant>> { let variant_attrs = self.parse_outer_attributes()?; let vlo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; if !self.recover_nested_adt_item(kw::Enum)? { return Ok(None); } let ident = self.parse_ident()?; let struct_def = if self.check(&token::OpenDelim(token::Brace)) { // Parse a struct variant. let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else if self.check(&token::OpenDelim(token::Paren)) { VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID) } else { VariantData::Unit(DUMMY_NODE_ID) }; let disr_expr = if self.eat(&token::Eq) { Some(self.parse_anon_const_expr()?) } else { None }; let vr = ast::Variant { ident, vis, id: DUMMY_NODE_ID, attrs: variant_attrs, data: struct_def, disr_expr, span: vlo.to(self.prev_token.span), is_placeholder: false, }; Ok(Some(vr)) } /// Parses `struct Foo { ... }`. fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; if self.eat(&token::Semi) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } // No `where` so: `struct Foo<T>;` } else if self.eat(&token::Semi) { VariantData::Unit(DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { let body = VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID); generics.where_clause = self.parse_where_clause()?; self.expect_semi()?; body } else { let token_str = super::token_descr(&self.token); let msg = &format!( "expected `where`, `{{`, `(`, or `;` after struct name, found {}", token_str ); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name"); return Err(err); }; Ok((class_name, ItemKind::Struct(vdata, generics))) } /// Parses `union Foo { ... }`. fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where` or `{{` after union name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where` or `{` after union name"); return Err(err); }; Ok((class_name, ItemKind::Union(vdata, generics))) } fn parse_record_struct_body( &mut self, ) -> PResult<'a, (Vec<StructField>, /* recovered */ bool)> { let mut fields = Vec::new(); let mut recovered = false; if self.eat(&token::OpenDelim(token::Brace)) { while self.token != token::CloseDelim(token::Brace) { let field = self.parse_struct_decl_field().map_err(|e| { self.consume_block(token::Brace, ConsumeClosingDelim::No); recovered = true; e }); match field { Ok(field) => fields.push(field), Err(mut err) => { err.emit(); break; } } } self.eat(&token::CloseDelim(token::Brace)); } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where`, or `{{` after struct name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, or `{` after struct name"); return Err(err); } Ok((fields, recovered)) } fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function self.parse_paren_comma_seq(|p| { let attrs = p.parse_outer_attributes()?; let lo = p.token.span; let vis = p.parse_visibility(FollowedByType::Yes)?; let ty = p.parse_ty()?; Ok(StructField { span: lo.to(ty.span), vis, ident: None, id: DUMMY_NODE_ID, ty, attrs, is_placeholder: false, }) }) .map(|(r, _)| r) } /// Parses an element of a struct declaration. fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; self.parse_single_struct_field(lo, vis, attrs) } /// Parses a structure field declaration. fn parse_single_struct_field( &mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, StructField> { let mut seen_comma: bool = false; let a_var = self.parse_name_and_ty(lo, vis, attrs)?; if self.token == token::Comma { seen_comma = true; } match self.token.kind { token::Comma => { self.bump(); } token::CloseDelim(token::Brace) => {} token::DocComment(_) => { let previous_span = self.prev_token.span; let mut err = self.span_fatal_err(self.token.span, Error::UselessDocComment); self.bump(); // consume the doc comment let comma_after_doc_seen = self.eat(&token::Comma); // `seen_comma` is always false, because we are inside doc block // condition is here to make code more readable if !seen_comma && comma_after_doc_seen { seen_comma = true; } if comma_after_doc_seen || self.token == token::CloseDelim(token::Brace) { err.emit(); } else { if !seen_comma { let sp = self.sess.source_map().next_point(previous_span); err.span_suggestion( sp, "missing comma here", ",".into(), Applicability::MachineApplicable, ); } return Err(err); } } _ => { let sp = self.prev_token.span.shrink_to_hi(); let mut err = self.struct_span_err( sp, &format!("expected `,`, or `}}`, found {}", super::token_descr(&self.token)), ); if self.token.is_ident() { // This is likely another field; emit the diagnostic and keep going err.span_suggestion( sp, "try adding a comma", ",".into(), Applicability::MachineApplicable, ); err.emit(); } else { return Err(err); } } } Ok(a_var) } /// Parses a structure field. fn parse_name_and_ty( &mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, StructField> { let name = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; Ok(StructField { span: lo.to(self.prev_token.span), ident: Some(name), vis, id: DUMMY_NODE_ID, ty, attrs, is_placeholder: false, }) } /// Parses a declarative macro 2.0 definition. /// The `macro` keyword has already been parsed. /// ``` /// MacBody = "{" TOKEN_STREAM "}" ; /// MacParams = "(" TOKEN_STREAM ")" ; /// DeclMac = "macro" Ident MacParams? MacBody ; /// ``` fn parse_item_decl_macro(&mut self, lo: Span) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let body = if self.check(&token::OpenDelim(token::Brace)) { self.parse_mac_args()? // `MacBody` } else if self.check(&token::OpenDelim(token::Paren)) { let params = self.parse_token_tree(); // `MacParams` let pspan = params.span(); if !self.check(&token::OpenDelim(token::Brace)) { return self.unexpected(); } let body = self.parse_token_tree(); // `MacBody` // Convert `MacParams MacBody` into `{ MacParams => MacBody }`. let bspan = body.span(); let arrow = TokenTree::token(token::FatArrow, pspan.between(bspan)); // `=>` let tokens = TokenStream::new(vec![params.into(), arrow.into(), body.into()]); let dspan = DelimSpan::from_pair(pspan.shrink_to_lo(), bspan.shrink_to_hi()); P(MacArgs::Delimited(dspan, MacDelimiter::Brace, tokens)) } else { return self.unexpected(); }; self.sess.gated_spans.gate(sym::decl_macro, lo.to(self.prev_token.span)); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: false }))) } /// Is this unambiguously the start of a `macro_rules! foo` item defnition? fn is_macro_rules_item(&mut self) -> bool { self.check_keyword(kw::MacroRules) && self.look_ahead(1, |t| *t == token::Not) && self.look_ahead(2, |t| t.is_ident()) } /// Parses a `macro_rules! foo { ... }` declarative macro. fn parse_item_macro_rules(&mut self, vis: &Visibility) -> PResult<'a, ItemInfo> { self.expect_keyword(kw::MacroRules)?; // `macro_rules` self.expect(&token::Not)?; // `!` let ident = self.parse_ident()?; let body = self.parse_mac_args()?; self.eat_semi_for_macro_if_needed(&body); self.complain_if_pub_macro(vis, true); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: true }))) } /// Item macro invocations or `macro_rules!` definitions need inherited visibility. /// If that's not the case, emit an error. fn complain_if_pub_macro(&self, vis: &Visibility, macro_rules: bool) { if let VisibilityKind::Inherited = vis.node { return; } let vstr = pprust::vis_to_string(vis); let vstr = vstr.trim_end(); if macro_rules { let msg = format!("can't qualify macro_rules invocation with `{}`", vstr); self.struct_span_err(vis.span, &msg) .span_suggestion( vis.span, "try exporting the macro", "#[macro_export]".to_owned(), Applicability::MaybeIncorrect, // speculative ) .emit(); } else { self.struct_span_err(vis.span, "can't qualify macro invocation with `pub`") .span_suggestion( vis.span, "remove the visibility", String::new(), Applicability::MachineApplicable, ) .help(&format!("try adjusting the macro to put `{}` inside the invocation", vstr)) .emit(); } } fn eat_semi_for_macro_if_needed(&mut self, args: &MacArgs) { if args.need_semicolon() && !self.eat(&token::Semi) { self.report_invalid_macro_expansion_item(args); } } fn report_invalid_macro_expansion_item(&self, args: &MacArgs) { let span = args.span().expect("undelimited macro call"); let mut err = self.struct_span_err( span, "macros that expand to items must be delimited with braces or followed by a semicolon", ); if self.unclosed_delims.is_empty() { let DelimSpan { open, close } = match args { MacArgs::Empty | MacArgs::Eq(..) => unreachable!(), MacArgs::Delimited(dspan, ..) => *dspan, }; err.multipart_suggestion( "change the delimiters to curly braces", vec![(open, "{".to_string()), (close, '}'.to_string())], Applicability::MaybeIncorrect, ); } else { err.span_suggestion( span, "change the delimiters to curly braces", " { /* items */ }".to_string(), Applicability::HasPlaceholders, ); } err.span_suggestion( span.shrink_to_hi(), "add a semicolon", ';'.to_string(), Applicability::MaybeIncorrect, ); err.emit(); } /// Checks if current token is one of tokens which cannot be nested like `kw::Enum`. In case /// it is, we try to parse the item and report error about nested types. fn recover_nested_adt_item(&mut self, keyword: Symbol) -> PResult<'a, bool> { if (self.token.is_keyword(kw::Enum) || self.token.is_keyword(kw::Struct) || self.token.is_keyword(kw::Union)) && self.look_ahead(1, |t| t.is_ident()) { let kw_token = self.token.clone(); let kw_str = pprust::token_to_string(&kw_token); let item = self.parse_item()?; self.struct_span_err( kw_token.span, &format!("`{}` definition cannot be nested inside `{}`", kw_str, keyword), ) .span_suggestion( item.unwrap().span, &format!("consider creating a new `{}` definition instead of nesting", kw_str), String::new(), Applicability::MaybeIncorrect, ) .emit(); // We successfully parsed the item but we must inform the caller about nested problem. return Ok(false); } Ok(true) } } /// The parsing configuration used to parse a parameter list (see `parse_fn_params`). /// /// The function decides if, per-parameter `p`, `p` must have a pattern or just a type. type ReqName = fn(Edition) -> bool; /// Parsing of functions and methods. impl<'a> Parser<'a> { /// Parse a function starting from the front matter (`const ...`) to the body `{ ... }` or `;`. fn parse_fn( &mut self, attrs: &mut Vec<Attribute>, req_name: ReqName, ) -> PResult<'a, (Ident, FnSig, Generics, Option<P<Block>>)> { let header = self.parse_fn_front_matter()?; // `const ... fn` let ident = self.parse_ident()?; // `foo` let mut generics = self.parse_generics()?; // `<'a, T, ...>` let decl = self.parse_fn_decl(req_name, AllowPlus::Yes)?; // `(p: u8, ...)` generics.where_clause = self.parse_where_clause()?; // `where T: Ord` let body = self.parse_fn_body(attrs)?; // `;` or `{ ... }`. Ok((ident, FnSig { header, decl }, generics, body)) } /// Parse the "body" of a function. /// This can either be `;` when there's no body, /// or e.g. a block when the function is a provided one. fn parse_fn_body(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, Option<P<Block>>> { let (inner_attrs, body) = if self.check(&token::Semi) { self.bump(); // `;` (Vec::new(), None) } else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() { self.parse_inner_attrs_and_block().map(|(attrs, body)| (attrs, Some(body)))? } else if self.token.kind == token::Eq { // Recover `fn foo() = $expr;`. self.bump(); // `=` let eq_sp = self.prev_token.span; let _ = self.parse_expr()?; self.expect_semi()?; // `;` let span = eq_sp.to(self.prev_token.span); self.struct_span_err(span, "function body cannot be `= expression;`") .multipart_suggestion( "surround the expression with `{` and `}` instead of `=` and `;`", vec![(eq_sp, "{".to_string()), (self.prev_token.span, " }".to_string())], Applicability::MachineApplicable, ) .emit(); (Vec::new(), Some(self.mk_block_err(span))) } else { return self.expected_semi_or_open_brace(); }; attrs.extend(inner_attrs); Ok(body) } /// Is the current token the start of an `FnHeader` / not a valid parse? pub(super) fn check_fn_front_matter(&mut self) -> bool { // We use an over-approximation here. // `const const`, `fn const` won't parse, but we're not stepping over other syntax either. const QUALS: [Symbol; 4] = [kw::Const, kw::Async, kw::Unsafe, kw::Extern]; self.check_keyword(kw::Fn) // Definitely an `fn`. // `$qual fn` or `$qual $qual`: || QUALS.iter().any(|&kw| self.check_keyword(kw)) && self.look_ahead(1, |t| { // ...qualified and then `fn`, e.g. `const fn`. t.is_keyword(kw::Fn) // Two qualifiers. This is enough. Due `async` we need to check that it's reserved. || t.is_non_raw_ident_where(|i| QUALS.contains(&i.name) && i.is_reserved()) }) // `extern ABI fn` || self.check_keyword(kw::Extern) && self.look_ahead(1, |t| t.can_begin_literal_maybe_minus()) && self.look_ahead(2, |t| t.is_keyword(kw::Fn)) } /// Parses all the "front matter" (or "qualifiers") for a `fn` declaration, /// up to and including the `fn` keyword. The formal grammar is: /// /// ``` /// Extern = "extern" StringLit ; /// FnQual = "const"? "async"? "unsafe"? Extern? ; /// FnFrontMatter = FnQual? "fn" ; /// ``` pub(super) fn parse_fn_front_matter(&mut self) -> PResult<'a, FnHeader> { let constness = self.parse_constness(); let asyncness = self.parse_asyncness(); let unsafety = self.parse_unsafety(); let ext = self.parse_extern()?; if let Async::Yes { span, .. } = asyncness { self.ban_async_in_2015(span); } if !self.eat_keyword(kw::Fn) { // It is possible for `expect_one_of` to recover given the contents of // `self.expected_tokens`, therefore, do not use `self.unexpected()` which doesn't // account for this. if !self.expect_one_of(&[], &[])? { unreachable!() } } Ok(FnHeader { constness, unsafety, asyncness, ext }) } /// We are parsing `async fn`. If we are on Rust 2015, emit an error. fn ban_async_in_2015(&self, span: Span) { if span.rust_2015() { let diag = self.diagnostic(); struct_span_err!(diag, span, E0670, "`async fn` is not permitted in the 2015 edition") .note("to use `async fn`, switch to Rust 2018") .help("set `edition = \"2018\"` in `Cargo.toml`") .note("for more on editions, read https://doc.rust-lang.org/edition-guide") .emit(); } } /// Parses the parameter list and result type of a function declaration. pub(super) fn parse_fn_decl( &mut self, req_name: ReqName, ret_allow_plus: AllowPlus, ) -> PResult<'a, P<FnDecl>> { Ok(P(FnDecl { inputs: self.parse_fn_params(req_name)?, output: self.parse_ret_ty(ret_allow_plus, RecoverQPath::Yes)?, })) } /// Parses the parameter list of a function, including the `(` and `)` delimiters. fn parse_fn_params(&mut self, req_name: ReqName) -> PResult<'a, Vec<Param>> { let mut first_param = true; // Parse the arguments, starting out with `self` being allowed... let (mut params, _) = self.parse_paren_comma_seq(|p| { let param = p.parse_param_general(req_name, first_param).or_else(|mut e| { e.emit(); let lo = p.prev_token.span; // Skip every token until next possible arg or end. p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]); // Create a placeholder argument for proper arg count (issue #34264). Ok(dummy_arg(Ident::new(kw::Invalid, lo.to(p.prev_token.span)))) }); // ...now that we've parsed the first argument, `self` is no longer allowed. first_param = false; param })?; // Replace duplicated recovered params with `_` pattern to avoid unnecessary errors. self.deduplicate_recovered_params_names(&mut params); Ok(params) } /// Parses a single function parameter. /// /// - `self` is syntactically allowed when `first_param` holds. fn parse_param_general(&mut self, req_name: ReqName, first_param: bool) -> PResult<'a, Param> { let lo = self.token.span; let attrs = self.parse_outer_attributes()?; // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here. if let Some(mut param) = self.parse_self_param()? { param.attrs = attrs.into(); return if first_param { Ok(param) } else { self.recover_bad_self_param(param) }; } let is_name_required = match self.token.kind { token::DotDotDot => false, _ => req_name(self.token.span.edition()), }; let (pat, ty) = if is_name_required || self.is_named_param() { debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required); let pat = self.parse_fn_param_pat()?; if let Err(mut err) = self.expect(&token::Colon) { return if let Some(ident) = self.parameter_without_type(&mut err, pat, is_name_required, first_param) { err.emit(); Ok(dummy_arg(ident)) } else { Err(err) }; } self.eat_incorrect_doc_comment_for_param_type(); (pat, self.parse_ty_for_param()?) } else { debug!("parse_param_general ident_to_pat"); let parser_snapshot_before_ty = self.clone(); self.eat_incorrect_doc_comment_for_param_type(); let mut ty = self.parse_ty_for_param(); if ty.is_ok() && self.token != token::Comma && self.token != token::CloseDelim(token::Paren) { // This wasn't actually a type, but a pattern looking like a type, // so we are going to rollback and re-parse for recovery. ty = self.unexpected(); } match ty { Ok(ty) => { let ident = Ident::new(kw::Invalid, self.prev_token.span); let bm = BindingMode::ByValue(Mutability::Not); let pat = self.mk_pat_ident(ty.span, bm, ident); (pat, ty) } // If this is a C-variadic argument and we hit an error, return the error. Err(err) if self.token == token::DotDotDot => return Err(err), // Recover from attempting to parse the argument as a type without pattern. Err(mut err) => { err.cancel(); mem::replace(self, parser_snapshot_before_ty); self.recover_arg_parse()? } } }; let span = lo.to(self.token.span); Ok(Param { attrs: attrs.into(), id: ast::DUMMY_NODE_ID, is_placeholder: false, pat, span, ty, }) } /// Returns the parsed optional self parameter and whether a self shortcut was used. fn parse_self_param(&mut self) -> PResult<'a, Option<Param>> { // Extract an identifier *after* having confirmed that the token is one. let expect_self_ident = |this: &mut Self| match this.token.ident() { Some((ident, false)) => { this.bump(); ident } _ => unreachable!(), }; // Is `self` `n` tokens ahead? let is_isolated_self = |this: &Self, n| { this.is_keyword_ahead(n, &[kw::SelfLower]) && this.look_ahead(n + 1, |t| t != &token::ModSep) }; // Is `mut self` `n` tokens ahead? let is_isolated_mut_self = |this: &Self, n| this.is_keyword_ahead(n, &[kw::Mut]) && is_isolated_self(this, n + 1); // Parse `self` or `self: TYPE`. We already know the current token is `self`. let parse_self_possibly_typed = |this: &mut Self, m| { let eself_ident = expect_self_ident(this); let eself_hi = this.prev_token.span; let eself = if this.eat(&token::Colon) { SelfKind::Explicit(this.parse_ty()?, m) } else { SelfKind::Value(m) }; Ok((eself, eself_ident, eself_hi)) }; // Recover for the grammar `*self`, `*const self`, and `*mut self`. let recover_self_ptr = |this: &mut Self| { let msg = "cannot pass `self` by raw pointer"; let span = this.token.span; this.struct_span_err(span, msg).span_label(span, msg).emit(); Ok((SelfKind::Value(Mutability::Not), expect_self_ident(this), this.prev_token.span)) }; // Parse optional `self` parameter of a method. // Only a limited set of initial token sequences is considered `self` parameters; anything // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.token.span; let (eself, eself_ident, eself_hi) = match self.token.uninterpolate().kind { token::BinOp(token::And) => { let eself = if is_isolated_self(self, 1) { // `&self` self.bump(); SelfKind::Region(None, Mutability::Not) } else if is_isolated_mut_self(self, 1) { // `&mut self` self.bump(); self.bump(); SelfKind::Region(None, Mutability::Mut) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_self(self, 2) { // `&'lt self` self.bump(); let lt = self.expect_lifetime(); SelfKind::Region(Some(lt), Mutability::Not) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_mut_self(self, 2) { // `&'lt mut self` self.bump(); let lt = self.expect_lifetime(); self.bump(); SelfKind::Region(Some(lt), Mutability::Mut) } else { // `&not_self` return Ok(None); }; (eself, expect_self_ident(self), self.prev_token.span) } // `*self` token::BinOp(token::Star) if is_isolated_self(self, 1) => { self.bump(); recover_self_ptr(self)? } // `*mut self` and `*const self` token::BinOp(token::Star) if self.look_ahead(1, |t| t.is_mutability()) && is_isolated_self(self, 2) => { self.bump(); self.bump(); recover_self_ptr(self)? } // `self` and `self: TYPE` token::Ident(..) if is_isolated_self(self, 0) => { parse_self_possibly_typed(self, Mutability::Not)? } // `mut self` and `mut self: TYPE` token::Ident(..) if is_isolated_mut_self(self, 0) => { self.bump(); parse_self_possibly_typed(self, Mutability::Mut)? } _ => return Ok(None), }; let eself = source_map::respan(eself_lo.to(eself_hi), eself); Ok(Some(Param::from_self(AttrVec::default(), eself, eself_ident))) } fn is_named_param(&self) -> bool { let offset = match self.token.kind { token::Interpolated(ref nt) => match **nt { token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), _ => 0, }, token::BinOp(token::And) | token::AndAnd => 1, _ if self.token.is_keyword(kw::Mut) => 1, _ => 0, }; self.look_ahead(offset, |t| t.is_ident()) && self.look_ahead(offset + 1, |t| t == &token::Colon) } fn recover_first_param(&mut self) -> &'static str { match self .parse_outer_attributes() .and_then(|_| self.parse_self_param()) .map_err(|mut e| e.cancel()) { Ok(Some(_)) => "method", _ => "function", } } }
{ // Space between `pub` keyword and the identifier // // pub S {} // ^^^ `sp` points here let sp = self.prev_token.span.between(self.token.span); let full_sp = self.prev_token.span.to(self.token.span); let ident_sp = self.token.span; if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) { // possible public struct definition where `struct` was forgotten let ident = self.parse_ident().unwrap(); let msg = format!("add `struct` here to parse `{}` as a public struct", ident); let mut err = self.struct_span_err(sp, "missing `struct` for struct definition"); err.span_suggestion_short( sp, &msg, " struct ".into(), Applicability::MaybeIncorrect, // speculative ); Err(err) } else if self.look_ahead(1, |t| *t == token::OpenDelim(token::Paren)) { let ident = self.parse_ident().unwrap(); self.bump(); // `(` let kw_name = self.recover_first_param(); self.consume_block(token::Paren, ConsumeClosingDelim::Yes); let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) { self.eat_to_tokens(&[&token::OpenDelim(token::Brace)]); self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::OpenDelim(token::Brace)) { self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::Colon) { let kw = "struct"; (kw, kw, false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); let suggestion = format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name); err.span_suggestion_short( sp, &suggestion, format!(" {} ", kw), Applicability::MachineApplicable, ); } else { if let Ok(snippet) = self.span_to_snippet(ident_sp) { err.span_suggestion( full_sp, "if you meant to call a macro, try", format!("{}!", snippet), // this is the `ambiguous` conditional branch Applicability::MaybeIncorrect, ); } else { err.help( "if you meant to call a macro, remove the `pub` \ and add a trailing `!` after the identifier", ); } } Err(err) } else if self.look_ahead(1, |t| *t == token::Lt) { let ident = self.parse_ident().unwrap(); self.eat_to_tokens(&[&token::Gt]); self.bump(); // `>` let (kw, kw_name, ambiguous) = if self.eat(&token::OpenDelim(token::Paren)) { ("fn", self.recover_first_param(), false) } else if self.check(&token::OpenDelim(token::Brace)) { ("struct", "struct", false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { err.span_suggestion_short( sp, &format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name), format!(" {} ", kw), Applicability::MachineApplicable, ); } Err(err) } else { Ok(()) } }
EditListingWizardTab.js
import { types as sdkTypes } from '../../util/sdkLoader'; import React from 'react'; import PropTypes from 'prop-types'; import { intlShape } from '../../util/reactIntl'; import routeConfiguration from '../../routeConfiguration'; import { LISTING_PAGE_PARAM_TYPE_DRAFT, LISTING_PAGE_PARAM_TYPE_NEW, LISTING_PAGE_PARAM_TYPES, } from '../../util/urlHelpers'; import { ensureListing } from '../../util/data'; import { createResourceLocatorString } from '../../util/routes'; import { EditListingActivityPanel, EditListingAvailabilityPanel, EditListingDescriptionPanel, EditListingFeaturesPanel, EditListingLocationPanel, EditListingPhotosPanel, EditListingPoliciesPanel, EditListingPricingPanel, EditListingRentalstylePanel, EditListingBasicinfoPanel, EditListingDetailinfoPanel, EditListingAdditionalitemPanel, // [ADD_EDITLISTINGPANEL_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py } from '../../components'; import css from './EditListingWizard.css'; import { stringifyDateToISO8601 } from '../../util/dates'; import { defaultValueWithEnUSD } from '../FieldCurrencyInput/FieldCurrencyInput.example'; const { LatLng } = sdkTypes; // All chars must be lower case. // EditListingWizard.tabLabel{*} in src/translation/--.json must be the same // with first charactor uppercased (captalized). // (related in src/components/EditListingWizard/EditListingWizardTab.js > createNextButtonText()) export const AVAILABILITY = 'availability'; export const DESCRIPTION = 'description'; export const FEATURES = 'features'; export const POLICY = 'policy'; export const LOCATION = 'location'; export const PRICING = 'pricing'; export const PHOTOS = 'photos'; export const ACTIVITY = 'activity'; export const RENTALSTYLE = 'rentalstyle'; export const BASICINFO = 'basicinfo'; export const DETAILINFO = 'detailinfo'; export const ADDITIONALITEM = 'additionalitem'; // [ADD_EDITLISTINGIDENTIFIER_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py // EditListingWizardTab component supports these tabs export const SUPPORTED_TABS = [ DESCRIPTION, FEATURES, POLICY, LOCATION, PRICING, AVAILABILITY, PHOTOS, ACTIVITY, RENTALSTYLE, BASICINFO, DETAILINFO, ADDITIONALITEM, // [ADD_SUPPORTEDTAB_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py ]; const pathParamsToNextTab = (params, tab, marketplaceTabs) => { const nextTabIndex = marketplaceTabs.findIndex(s => s === tab) + 1; const nextTab = nextTabIndex < marketplaceTabs.length ? marketplaceTabs[nextTabIndex] : marketplaceTabs[marketplaceTabs.length - 1]; return { ...params, tab: nextTab }; }; // When user has update draft listing, he should be redirected to next EditListingWizardTab const redirectAfterDraftUpdate = (listingId, params, tab, marketplaceTabs, history) => { const currentPathParams = { ...params, type: LISTING_PAGE_PARAM_TYPE_DRAFT, id: listingId, }; const routes = routeConfiguration(); // Replace current "new" path to "draft" path. // Browser's back button should lead to editing current draft instead of creating a new one. if (params.type === LISTING_PAGE_PARAM_TYPE_NEW) { const draftURI = createResourceLocatorString('EditListingPage', routes, currentPathParams, {}); history.replace(draftURI); } // Redirect to next tab const nextPathParams = pathParamsToNextTab(currentPathParams, tab, marketplaceTabs); const to = createResourceLocatorString('EditListingPage', routes, nextPathParams, {}); history.push(to); }; const EditListingWizardTab = props => { const { tab, marketplaceTabs, params, errors, fetchInProgress, newListingPublished, history, images, availability, listing, currentUser, handleCreateFlowTabScrolling, handlePublishListing, onUpdateListing, onCreateListingDraft, onImageUpload, onUpdateImageOrder, onRemoveImage, onChange, updatedTab, updateInProgress, isLastTab, intl, } = props;
const { type } = params; const isNewURI = type === LISTING_PAGE_PARAM_TYPE_NEW; const isDraftURI = type === LISTING_PAGE_PARAM_TYPE_DRAFT; const isNewListingFlow = isNewURI || isDraftURI; const currentListing = ensureListing(listing); const imageIds = images => { return images ? images.map(img => img.imageId || img.id) : null; }; const onCompleteEditListingWizardTab = (tab, updateValues, currentUser) => { const defaultValues = { title: '(no title)', geolocation: currentUser && currentUser.attributes.profile.publicData.geolocation && currentUser.attributes.profile.publicData.geolocation.selectedPlace && currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin && new LatLng( currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin.lat, currentUser.attributes.profile.publicData.geolocation.selectedPlace.origin.lng ), }; updateValues = { ...defaultValues, ...updateValues, }; const { images: updatedImages, ...otherValues } = updateValues; const imageProperty = typeof updatedImages !== 'undefined' ? { images: imageIds(updatedImages) } : {}; const updateValuesWithImages = { ...otherValues, ...imageProperty }; if (isNewListingFlow) { const onUpsertListingDraft = isNewURI ? (tab, updateValues) => onCreateListingDraft(updateValues) : onUpdateListing; const upsertValues = isNewURI ? updateValuesWithImages : { ...updateValuesWithImages, id: currentListing.id }; onUpsertListingDraft(tab, upsertValues) .then(r => { if (tab !== marketplaceTabs[marketplaceTabs.length - 1]) { // Create listing flow: smooth scrolling polyfill to scroll to correct tab handleCreateFlowTabScrolling(false); // After successful saving of draft data, user should be redirected to next tab redirectAfterDraftUpdate(r.data.data.id.uuid, params, tab, marketplaceTabs, history); } else { handlePublishListing(currentListing.id); } }) .catch(e => { // No need for extra actions }); } else { onUpdateListing(tab, { ...updateValuesWithImages, id: currentListing.id }); } }; const panelProps = tab => { return { className: css.panel, errors, listing, currentUser, onChange, panelUpdated: updatedTab === tab, updateInProgress, // newListingPublished and fetchInProgress are flags for the last wizard tab ready: newListingPublished, disabled: fetchInProgress, }; }; const createNextButtonText = (tab, marketplaceTabs, isNewListingFlow, isLastTab) => { const capitalizeFirstLetter = str => { return str && str.length > 0 ? str.charAt(0).toUpperCase() + str.slice(1) : ''; }; const tabLabel = intl.formatMessage({ id: `EditListingWizard.tabLabel${capitalizeFirstLetter(tab)}`, }); if (isNewListingFlow) { if (!isLastTab) { const nextTab = marketplaceTabs[marketplaceTabs.indexOf(tab) + 1]; const nextTabLabel = intl.formatMessage({ id: `EditListingWizard.tabLabel${capitalizeFirstLetter(nextTab)}`, }); // In creating a new listing, and editing not last tab, "Next {nextTabLebel}" return intl.formatMessage( { id: 'EditListingWizard.saveNewNotLastTab', }, { nextTabLabel, } ); } else { // In creating a new listing, and editing the last tab, "Publish listing" return intl.formatMessage({ id: 'EditListingWizard.saveNewLastTab' }); } } else { // In creating a already-exist listing, "Save {tabLabel}" return intl.formatMessage({ id: 'EditListingWizard.saveEditTab' }, { tabLabel: tabLabel }); } }; switch (tab) { case DESCRIPTION: { return ( <EditListingDescriptionPanel {...panelProps(DESCRIPTION)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case FEATURES: { return ( <EditListingFeaturesPanel {...panelProps(FEATURES)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case POLICY: { return ( <EditListingPoliciesPanel {...panelProps(POLICY)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case LOCATION: { return ( <EditListingLocationPanel {...panelProps(LOCATION)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case PRICING: { return ( <EditListingPricingPanel {...panelProps(PRICING)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case AVAILABILITY: { return ( <EditListingAvailabilityPanel {...panelProps(AVAILABILITY)} availability={availability} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case PHOTOS: { return ( <EditListingPhotosPanel {...panelProps(PHOTOS)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} images={images} onImageUpload={onImageUpload} onRemoveImage={onRemoveImage} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} onUpdateImageOrder={onUpdateImageOrder} /> ); } case ACTIVITY: { return ( <EditListingActivityPanel {...panelProps(ACTIVITY)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case RENTALSTYLE: { return ( <EditListingRentalstylePanel {...panelProps(RENTALSTYLE)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case BASICINFO: { return ( <EditListingBasicinfoPanel {...panelProps(BASICINFO)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case DETAILINFO: { return ( <EditListingDetailinfoPanel {...panelProps(DETAILINFO)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } case ADDITIONALITEM: { return ( <EditListingAdditionalitemPanel {...panelProps(ADDITIONALITEM)} submitButtonText={createNextButtonText(tab, marketplaceTabs, isNewListingFlow, isLastTab)} onSubmit={values => { onCompleteEditListingWizardTab(tab, values, currentUser); }} /> ); } // [ADD_TABCASE_HERE] NOTE: Do not delete this line. Used by misc/copyEditLisingPanelAndForm.py default: return null; } }; EditListingWizardTab.defaultProps = { listing: null, updatedTab: null, }; const { array, bool, func, object, oneOf, shape, string } = PropTypes; EditListingWizardTab.propTypes = { params: shape({ id: string.isRequired, slug: string.isRequired, type: oneOf(LISTING_PAGE_PARAM_TYPES).isRequired, tab: oneOf(SUPPORTED_TABS).isRequired, }).isRequired, errors: shape({ createListingDraftError: object, publishListingError: object, updateListingError: object, showListingsError: object, uploadImageError: object, }).isRequired, fetchInProgress: bool.isRequired, newListingPublished: bool.isRequired, history: shape({ push: func.isRequired, replace: func.isRequired, }).isRequired, images: array.isRequired, availability: object.isRequired, // We cannot use propTypes.listing since the listing might be a draft. listing: shape({ attributes: shape({ publicData: object, description: string, geolocation: object, pricing: object, title: string, }), images: array, }), handleCreateFlowTabScrolling: func.isRequired, handlePublishListing: func.isRequired, onUpdateListing: func.isRequired, onCreateListingDraft: func.isRequired, onImageUpload: func.isRequired, onUpdateImageOrder: func.isRequired, onRemoveImage: func.isRequired, onChange: func.isRequired, updatedTab: string, updateInProgress: bool.isRequired, intl: intlShape.isRequired, }; export default EditListingWizardTab;
subscription_item.rs
// ====================================== // This file was automatically generated. // ====================================== use crate::config::{Client, Response}; use crate::ids::{PlanId, SubscriptionId, SubscriptionItemId}; use crate::params::{Deleted, Expand, List, Metadata, Object, Timestamp}; use crate::resources::{Plan, SubscriptionItemBillingThresholds, TaxRate}; use serde_derive::{Deserialize, Serialize}; /// The resource representing a Stripe "SubscriptionItem". /// /// For more details see [https://stripe.com/docs/api/subscription_items/object](https://stripe.com/docs/api/subscription_items/object). #[derive(Clone, Debug, Deserialize, Serialize)] pub struct
{ /// Unique identifier for the object. pub id: SubscriptionItemId, /// Define thresholds at which an invoice will be sent, and the related subscription advanced to a new billing period. #[serde(skip_serializing_if = "Option::is_none")] pub billing_thresholds: Option<SubscriptionItemBillingThresholds>, /// Time at which the object was created. /// /// Measured in seconds since the Unix epoch. #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<Timestamp>, // Always true for a deleted object #[serde(default)] pub deleted: bool, /// Set of key-value pairs that you can attach to an object. /// /// This can be useful for storing additional information about the object in a structured format. #[serde(default)] pub metadata: Metadata, #[serde(skip_serializing_if = "Option::is_none")] pub plan: Option<Plan>, /// The [quantity](https://stripe.com/docs/subscriptions/quantities) of the plan to which the customer should be subscribed. #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<u64>, /// The `subscription` this `subscription_item` belongs to. #[serde(skip_serializing_if = "Option::is_none")] pub subscription: Option<String>, /// The tax rates which apply to this `subscription_item`. /// /// When set, the `default_tax_rates` on the subscription do not apply to this `subscription_item`. #[serde(skip_serializing_if = "Option::is_none")] pub tax_rates: Option<Vec<TaxRate>>, } impl SubscriptionItem { /// Returns a list of your subscription items for a given subscription. pub fn list( client: &Client, params: ListSubscriptionItems<'_>, ) -> Response<List<SubscriptionItem>> { client.get_query("/subscription_items", &params) } /// Retrieves the invoice item with the given ID. pub fn retrieve( client: &Client, id: &SubscriptionItemId, expand: &[&str], ) -> Response<SubscriptionItem> { client.get_query(&format!("/subscription_items/{}", id), &Expand { expand }) } /// Updates the plan or quantity of an item on a current subscription. pub fn update( client: &Client, id: &SubscriptionItemId, params: UpdateSubscriptionItem<'_>, ) -> Response<SubscriptionItem> { client.post_form(&format!("/subscription_items/{}", id), &params) } /// Deletes an item from the subscription. /// /// Removing a subscription item from a subscription will not cancel the subscription. pub fn delete( client: &Client, id: &SubscriptionItemId, ) -> Response<Deleted<SubscriptionItemId>> { client.delete(&format!("/subscription_items/{}", id)) } } impl Object for SubscriptionItem { type Id = SubscriptionItemId; fn id(&self) -> Self::Id { self.id.clone() } fn object(&self) -> &'static str { "subscription_item" } } /// The parameters for `SubscriptionItem::list`. #[derive(Clone, Debug, Serialize)] pub struct ListSubscriptionItems<'a> { /// A cursor for use in pagination. /// /// `ending_before` is an object ID that defines your place in the list. /// For instance, if you make a list request and receive 100 objects, starting with `obj_bar`, your subsequent call can include `ending_before=obj_bar` in order to fetch the previous page of the list. #[serde(skip_serializing_if = "Option::is_none")] pub ending_before: Option<SubscriptionItemId>, /// Specifies which fields in the response should be expanded. #[serde(skip_serializing_if = "Expand::is_empty")] pub expand: &'a [&'a str], /// A limit on the number of objects to be returned. /// /// Limit can range between 1 and 100, and the default is 10. #[serde(skip_serializing_if = "Option::is_none")] pub limit: Option<u64>, /// A cursor for use in pagination. /// /// `starting_after` is an object ID that defines your place in the list. /// For instance, if you make a list request and receive 100 objects, ending with `obj_foo`, your subsequent call can include `starting_after=obj_foo` in order to fetch the next page of the list. #[serde(skip_serializing_if = "Option::is_none")] pub starting_after: Option<SubscriptionItemId>, /// The ID of the subscription whose items will be retrieved. pub subscription: SubscriptionId, } impl<'a> ListSubscriptionItems<'a> { pub fn new(subscription: SubscriptionId) -> Self { ListSubscriptionItems { ending_before: Default::default(), expand: Default::default(), limit: Default::default(), starting_after: Default::default(), subscription, } } } /// The parameters for `SubscriptionItem::update`. #[derive(Clone, Debug, Serialize, Default)] pub struct UpdateSubscriptionItem<'a> { /// Define thresholds at which an invoice will be sent, and the subscription advanced to a new billing period. #[serde(skip_serializing_if = "Option::is_none")] pub billing_thresholds: Option<SubscriptionItemBillingThresholds>, /// Specifies which fields in the response should be expanded. #[serde(skip_serializing_if = "Expand::is_empty")] pub expand: &'a [&'a str], /// Set of key-value pairs that you can attach to an object. /// /// This can be useful for storing additional information about the object in a structured format. #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option<Metadata>, /// The identifier of the new plan for this subscription item. #[serde(skip_serializing_if = "Option::is_none")] pub plan: Option<PlanId>, /// Flag indicating whether to [prorate](https://stripe.com/docs/billing/subscriptions/prorations) switching plans during a billing cycle. #[serde(skip_serializing_if = "Option::is_none")] pub prorate: Option<bool>, /// If set, the proration will be calculated as though the subscription was updated at the given time. /// /// This can be used to apply the same proration that was previewed with the [upcoming invoice](#retrieve_customer_invoice) endpoint. #[serde(skip_serializing_if = "Option::is_none")] pub proration_date: Option<Timestamp>, /// The quantity you'd like to apply to the subscription item you're creating. #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<u64>, /// The tax rates which apply to this `subscription_item`. /// /// When set, the `default_tax_rates` on the subscription do not apply to this `subscription_item`. #[serde(skip_serializing_if = "Option::is_none")] pub tax_rates: Option<Vec<String>>, } impl<'a> UpdateSubscriptionItem<'a> { pub fn new() -> Self { UpdateSubscriptionItem { billing_thresholds: Default::default(), expand: Default::default(), metadata: Default::default(), plan: Default::default(), prorate: Default::default(), proration_date: Default::default(), quantity: Default::default(), tax_rates: Default::default(), } } }
SubscriptionItem
project-item.ts
/// <reference path="base-component.ts" /> /// <reference path="../decorators/autobind.ts" /> /// <reference path="../models/project.ts" /> /// <reference path="../models/drag-drop.ts" /> namespace App { // ProjectItem Class export class ProjectItem extends Component<HTMLUListElement, HTMLLIElement> implements Draggable { private project: Project;
return "1 person"; } return `${this.project.people} people`; } public constructor(hostElementId: string, project: Project) { super("single-project", hostElementId, false, project.id); this.project = project; this.configure(); this.renderProjectListContent(); } @autobind public dragStartListener(event: DragEvent) { // We only transfer the id to save memory. This is sufficient to retrieve the object data when we drop it. event.dataTransfer!.setData("text/plain", this.project.id); event.dataTransfer!.effectAllowed = "move"; } public dragEndListener(_: DragEvent) { console.log("DragEnd"); } public configure() { this.element.addEventListener("dragstart", this.dragStartListener); this.element.addEventListener("dragend", this.dragEndListener); } public renderProjectListContent() { this.element.querySelector("h2")!.textContent = this.project.title; this.element.querySelector("h3")!.textContent = this.persons + " assigned"; this.element.querySelector("p")!.textContent = this.project.description; } } }
public get persons() { if (this.project.people == 1) {
download.go
package main import ( "errors" "io" "net/http" "os" "github.com/sirupsen/logrus" ) // DownloadFile makes a GET request to download a photo/video etc. to a file func
(url, dir, name string) error { logrus.Debugf("downloading %s to %s/%s", url, dir, name) res, err := http.Get(url) if err != nil { return err } if res.StatusCode != 200 { return errors.New("could not download file") } if res.ContentLength == 0 { logrus.Debugf("tried to download %s, but got nothing", url) return nil } defer res.Body.Close() f, err := os.Create(dir + "/" + name) if err != nil { return err } defer f.Close() _, err = io.Copy(f, res.Body) if err != nil { return err } return nil }
DownloadFile
RGB.d.ts
import type { IColorClass } from './IColorClass'; import type { StringEnum } from './utils/types'; export declare type RGBTuple = [number, number, number]; /** * Valid string enumerations for formating `ColorRGB` into either a string, or * an integer number. * * @enum */ declare const RGBFormat: StringEnum; export declare type ERGBStringFormat = typeof RGBFormat[string]; /** * RGB with Alpha color-space. The red, green, and blue channels are 8-bit * bytes (0..255) and will round/truncate on manipulation. */ export declare class ColorRGB implements IColorClass { #private; /** * The accepted string formats for parsing and generation * * @enum */ static readonly Formats: StringEnum; constructor(); /** * The red component as a byte (0..255) integer */ get red(): number; set red(byteValue: number); /** * The green component as a byte (0..255) integer */ get green(): number; set green(byteValue: number); /** * The blue component as a byte (0..255) integer */ get blue(): number; set blue(byteValue: number); /** * The alpha, or opacity, of the color as a unit (0..1) float */ get alpha(): number; set alpha(value: number); /**
* The following enums are accepted for formats: * - `INTEGER`: Integer representation including alpha as the LSB if it is not * the default 1.0. * - `INTEGER_ALPHA`: Integer representation with alpha included as the LSB. * - `HEX`: Hexidecimal string representation, only includes alpha as the LSB * if it is not the default opaque (1.0). * - `HEX_ALPHA`: Hexidecimal string with the alpha as the LSB. * - `FUNCTIONAL` (default): CSS-style functional notation. Only includes the * alpha channel if it is not opaque (1.0). * - `FUNCTIONAL_ALPHA`: CSS-style functional notation with the alpha * channel. Uses the "rgba()" function style. * * @param format Optional enum for the output format. Defaults to functional. * @returns String representation */ toString(format?: ERGBStringFormat): string; /** * Converts this RGB Color into it's integer representation. * * By default the alpha information is only included if the alpha value is * not 1.0, or the `forceAlpha` flag is true (defaults to false). For * serialization of colors it may be best to have this flag as true and * manage the alpha channels byte position with the `alphaMSB` flag for more * consistant byte arrangement. * * Additionally the `alphaMSB` switch can be used to move the alpha * information to the Most Significant Byte portion of the integer. Otherwise * (default) it remains as the Least Significant Byte. * * @param {boolean} [forceAlpha = false] If this flag is true, then * regardless of whether or not the alpha channel is opaque (1), than the * alpha information will be included in the results. This defaults to false * which will only use the alpha information if it is not completely opaque. * @param {boolean} [alphaMSB = false] Instructs the alpha component to be the * Most Significant Byte in the final result. If false (default) than it will * be the Least Significant Byte. * @returns {number} Integer number representation of the color. */ toInteger(forceAlpha?: boolean, alphaMSB?: boolean): number; /** * Converts this RGB Color into it's hexidecimal string representation. * * By default the alpha information is only included if the alpha value is * not 1.0, or the `forceAlpha` flag is true (defaults to false). For * serialization of colors it may be best to have this flag as true and * manage the alpha channels byte position with the `alphaMSB` flag for more * consistant byte arrangement. * * Additionally the `alphaMSB` switch can be used to move the alpha * information to the Most Significant Byte portion of the integer. Otherwise * (default) it remains as the Least Significant Byte. * * @param {boolean} [forceAlpha = false] If this flag is true, then * regardless of whether or not the alpha channel is opaque (1), than the * alpha information will be included in the results. This defaults to false * which will only use the alpha information if it is not completely opaque. * @param {boolean} [alphaMSB = false] Instructs the alpha component to be the * Most Significant Byte in the final result. If false (default) than it will * be the Least Significant Byte. * @returns {string} Hexidecimal string representation */ toHexString(forceAlpha?: boolean, alphaMSB?: boolean): string; /** * Converts this RGB Color into it's functional-notation string, as if it was * being used with CSS. * * By default the alpha information is only included if the alpha value is * not 1.0, or the `forceAlpha` flag is true (defaults to false). Additionally * it is truncated to 4 points of precision. * * The output follows this format: * ``` * rgb(255, 180, 127) * rgba(255, 180, 127, 180) * ``` * * @param {boolean} [forceAlpha = false] If this flag is true, then * regardless of whether or not the alpha channel is opaque (1), than the * alpha information will be included in the results. This defaults to false * which will only use the alpha information if it is not completely opaque. * @returns {string} Functional-notation string */ toFuncString(forceAlpha?: boolean): string; /** * Returns this color as an Array of numbers. The first 3 components are the * RGB channels as byte integers (0..255). The last component is the alpha * channel as it's unit-float (0..1). * * @returns {Array} Array of component values */ toArray(): number[]; /** * Sets the components of this RGB Color using variable arguments. The order * of the variables is taken as `set(R, G, B, A)`. Any missing components are * skipped. * * This will parse string values to the best of it's ability. This includes * parameter detection, and then treatment depending on the type. * * For the RGB components the following formats are accepted * - Integer 0..255 = mapped directly to the component * - Float 0..255 = truncates the decimal point and applied * - Percentage 0..100% = applies to the range 0..255 and set. * * For the alpha component, any value given is clamped to a unit 0..1. For * floats and percentages this is straight forward, for integers it just * becomes an on/off of 0 or 1. In other words, no byte conversion is made. * * @returns `this` for method-chaining */ set(...components: (number | string)[]): IColorClass; /** * Converts an incoming integer number into it's RGB(A) channel values and * sets this `ColorRGB` components appropriately. * * @param value Incoming integer number to convert * @param useAlpha If true, then an alpha component is present on this value, * and will be parsed appropriately. Default is `false`. * @param alphaMSB When `useAlpha` is true, this flag sets whether the alpha * component is in the Most-Significant-Byte, or the Least-Significant-Byte. * Default is to treat alpha as the LSB. * @returns `this` for method-chaining */ fromInteger(value: number, useAlpha?: boolean, alphaMSB?: boolean): IColorClass; /** * Parses the incoming string as a hexidecimal notation RGB(A) color. This is * case-insensitive, and the prefix "#" is optional. Accepts the following * formats: * * - `#FA0`: Short-form, half-byte values for each channel RGB. Will be * resized to the full-byte size 0..255. * - `#FA08`: Short-form, half-byte values for the RGB and Alpha channels. * Will be resized to the full-byte size 0..255. * - `#FFAA00`: Long-form, byte values for the RGB channels. * - `#FFAA0088`: Long-form, byte values for the RGB and Alpha channels. * * @param str Input string to parse * @returns `this` for method-chaining * * @throws {TypeError} If the string is not parsable as a hex value * @throws {TypeError} If the string has too many or too little */ fromHexString(str: string): ColorRGB; /** * Parses the input string as a CSS4 functional-notation color value. Only * accepts the `rgb()` and `rgba()` functions. Both the comma-separated and * space-separated formats are accepted. If the space-separated version is * used with an alpha channel, then a forward-slash delimiter is required * before the alpha channel. It will convert numeric formats in integer, * fractional, and scientific notation. As well as supporting percentages, and * the new "none" keyword for CSS4 (just implies 0). There is some forgiveness * on the formatting since it's regular-expression based. Things like mixed * formats between space/comma separated and such. Additionally, according to * the CSS4 spec, the `rgb()` version can still accept an alpha channel. * Either way, at least 3 components are required for at least RGB. * * Example formats accepted. * ``` * rgb(255, 127, 64) * rgb(255 127 64) * rgb(255, 127, 64, 0.5) * rgb(255 127 64 / 0.5) * rgba(100%, 50%, 25%) * rgba(100% 50% 25% / 50%) * ``` * * @param str Input string to parse * @returns `this` for method-chaining * * @throws {TypeError} if the string cannot be parsed * @throws {TypeError} if the number of components is invalid */ fromFuncString(str: string): ColorRGB; /** * Converts an incoming string to acceptable components and sets the channels * of this ColorRGB object. Will attempt to parse as each format in order * until one does not give an error. If none of the processes work then a * `TypeError` is thrown specifying so. * * Accepts the following formats with their specifications: * * ### Named Colors (X11) * Checks if the input string is a valid X11 color name as specified in the * CSS4 color module. If there is a match, it is converted to hexidecimal and * then processed. * * The special named color "transparent" is accepted and will result in black * with an alpha of 0 (fully transparent). * * ### Hexidecimal * Uses the {@link ColorRGB.fromHexString} method to parse as a hexidecimal * string. This is case insensitive and accepts shortform and longform hex * strings, with or without alpha channel. As with most hex strings if there * is an alpha component it is the least-significant byte. Additionally, the * prefix "#" is optional as well. * * - `#FA0`: Short-form, half-byte values for each channel RGB. Will be * resized to the full-byte size 0..255. * - `#FA08`: Short-form, half-byte values for the RGB and Alpha channels. * Will be resized to the full-byte size 0..255. * - `#FFAA00`: Long-form, byte values for the RGB channels. * - `#FFAA0088`: Long-form, byte values for the RGB and Alpha channels. * * ### Functional-notation * Uses the {@link ColorRGB.fromFuncString} method to parse as a * functional notation string in the style of CSS4, with some forgiveness. * Will accept either 3-component for RGB, or 4-component for RGBA. Each * parameter can be either an integer, float, or percentage value which will * be converted as appropriate for the channel. * * Example formats accepted. * ``` * rgb(255, 127, 64) * rgb(255 127 64) * rgb(255, 127, 64, 0.5) * rgb(255 127 64 / 0.5) * rgba(100%, 50%, 25%) * rgba(100% 50% 25% / 50%) * * rgb(200.5, 1.27e2, +64 / .5) * ``` * * @param str Input string * @returns `this` for method-chaining */ fromString(str: string): IColorClass; /** * Sets the components of this `ColorRGB` given an array. This is supplied * for clarity of API, but really just shortcuts to spread operating the * array into the `ColorRGB.set()` function. * * Accepts both strings and numbers. Strings will attempt to be converted * based on whatever type the value can be detected as. * * @see {@link ColorRGB.set} for the underlying functionality. * @param arr Input array * @returns `this` for method-chaining */ fromArray(arr: (number | string)[]): IColorClass; /** * Attempts to set the components of this `ColorRGB` given potential * properties of the supplied object. Any missing components will default to * 0, except for alpha which defaults to 1 (opaque). * * Each color searches for a single-letter property, or the full-word name. * - Red: `obj.r` OR `obj.red` OR 0 * - Green: `obj.g` OR `obj.green` OR 0 * - Blue: `obj.b` OR `obj.blue` OR 0 * - Alpha: `obj.a` OR `obj.alpha` OR obj.opacity OR 1 * * @param obj Plain JS object * @returns `this` for method-chaining */ fromObject(obj: Record<any, any>): IColorClass; /** * Attempts to parse the incoming parameter as a ColorRGB object and sets the * appropriate channels when found. Any missing components will use their * defaults, which for RGB is 0.0, and for Alpha is 1.0. * * Any failure to parse the object will throw an `Error` object. If a null, * or undefined object is supplied it will be quietly skipped. * * @param arg The argument to attempt to parse. * @returns `this` for method-chaining */ parse(arg: any): IColorClass; } export default ColorRGB;
* Returns the string representation of this color, with an optional formating * parameter. *
installer.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This is the Fuchsia Installer implementation that talks to fuchsia.update.installer FIDL API. use crate::{ app_set::FuchsiaAppSet, install_plan::{FuchsiaInstallPlan, UpdatePackageUrl}, }; use anyhow::{anyhow, Context as _}; use fidl_connector::{Connect, ServiceReconnector}; use fidl_fuchsia_hardware_power_statecontrol::RebootReason; use fidl_fuchsia_update_installer::{ InstallerMarker, InstallerProxy, RebootControllerMarker, RebootControllerProxy, }; use fidl_fuchsia_update_installer_ext::{ start_update, FetchFailureReason, Initiator, MonitorUpdateAttemptError, Options, PrepareFailureReason, State, StateId, UpdateAttemptError, }; use fuchsia_async as fasync; use fuchsia_component::client::connect_to_protocol; use fuchsia_url::pkg_url::PkgUrl; use fuchsia_zircon as zx; use futures::{future::LocalBoxFuture, lock::Mutex as AsyncMutex, prelude::*}; use log::{info, warn}; use omaha_client::{ app_set::AppSet as _, installer::{AppInstallResult, Installer, ProgressObserver}, protocol::{ request::InstallSource, response::{OmahaStatus, Response}, }, request_builder::RequestParams, }; use std::{rc::Rc, time::Duration}; use thiserror::Error; /// Represents possible reasons the installer could have ended in a failure state. Not exhaustive. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum InstallerFailureReason { Internal, OutOfSpace, UnsupportedDowngrade, } impl From<FetchFailureReason> for InstallerFailureReason { fn from(r: FetchFailureReason) -> InstallerFailureReason { match r { FetchFailureReason::Internal => InstallerFailureReason::Internal, FetchFailureReason::OutOfSpace => InstallerFailureReason::OutOfSpace, } } } impl From<PrepareFailureReason> for InstallerFailureReason { fn from(r: PrepareFailureReason) -> InstallerFailureReason { match r { PrepareFailureReason::Internal => InstallerFailureReason::Internal, PrepareFailureReason::OutOfSpace => InstallerFailureReason::OutOfSpace, PrepareFailureReason::UnsupportedDowngrade => { InstallerFailureReason::UnsupportedDowngrade } } } } /// Information from the config about whether an update is urgent. #[derive(Debug)] pub struct InstallResult { pub urgent_update: bool, } /// Information about a specific failure state that the installer ended in. #[derive(Debug, Copy, Clone)] pub struct InstallerFailure { state_name: &'static str, reason: InstallerFailureReason, } impl InstallerFailure { /// Returns the name of the system-updater state this failure occurred in pub fn state_name(self) -> &'static str { self.state_name } /// Returns the reason this failure occurred pub fn reason(self) -> InstallerFailureReason { self.reason } #[cfg(test)] pub fn new(state_name: &'static str, reason: InstallerFailureReason) -> Self { Self { state_name, reason } } } #[derive(Debug, Error)] pub enum FuchsiaInstallError { #[error("generic error")] Failure(#[from] anyhow::Error), #[error("FIDL error")] FIDL(#[from] fidl::Error), /// System update installer error. #[error("start update installer failed")] StartUpdate(#[from] UpdateAttemptError), #[error("monitor update installer failed")] MonitorUpdate(#[from] MonitorUpdateAttemptError), #[error("installer encountered failure state: {0:?}")] InstallerFailureState(InstallerFailure), #[error("installation ended unexpectedly")] InstallationEndedUnexpectedly, #[error("connect to installer service failed")] Connect(#[source] anyhow::Error), } #[derive(Debug)] pub struct FuchsiaInstaller<C = ServiceReconnector<InstallerMarker>> { connector: C, reboot_controller: Option<RebootControllerProxy>, app_set: Rc<AsyncMutex<FuchsiaAppSet>>, } impl FuchsiaInstaller<ServiceReconnector<InstallerMarker>> { pub fn new(app_set: Rc<AsyncMutex<FuchsiaAppSet>>) -> Self { let connector = ServiceReconnector::<InstallerMarker>::new(); Self { connector, reboot_controller: None, app_set } } } impl<C: Connect<Proxy = InstallerProxy> + Send> FuchsiaInstaller<C> { async fn perform_install_system_update<'a>( &'a mut self, url: &'a PkgUrl, install_source: &'a InstallSource, observer: Option<&'a dyn ProgressObserver>, ) -> Result<(), FuchsiaInstallError> { let options = Options { initiator: match install_source { InstallSource::ScheduledTask => Initiator::Service, InstallSource::OnDemand => Initiator::User, }, should_write_recovery: true, allow_attach_to_existing_attempt: true, }; let proxy = self.connector.connect().map_err(FuchsiaInstallError::Connect)?; let (reboot_controller, reboot_controller_server_end) = fidl::endpoints::create_proxy::<RebootControllerMarker>() .map_err(FuchsiaInstallError::FIDL)?; self.reboot_controller = Some(reboot_controller); let mut update_attempt = start_update(url, options, &proxy, Some(reboot_controller_server_end)).await?; while let Some(state) = update_attempt.try_next().await? { info!("Installer entered state: {}", state.name()); if let Some(observer) = observer { if let Some(progress) = state.progress() { observer .receive_progress( Some(state.name()), progress.fraction_completed(), state.download_size(), Some(progress.bytes_downloaded()), ) .await; } else { observer.receive_progress(Some(state.name()), 0., None, None).await; } } if state.id() == StateId::WaitToReboot || state.is_success() { return Ok(()); } else if state.is_failure() { match state { State::FailFetch(fail_fetch_data) => { return Err(FuchsiaInstallError::InstallerFailureState(InstallerFailure { state_name: state.name(), reason: fail_fetch_data.reason().into(), })); } State::FailPrepare(prepare_failure_reason) => { return Err(FuchsiaInstallError::InstallerFailureState(InstallerFailure { state_name: state.name(), reason: prepare_failure_reason.into(), })); } _ => { return Err(FuchsiaInstallError::InstallerFailureState(InstallerFailure { state_name: state.name(), reason: InstallerFailureReason::Internal, })) } } } } Err(FuchsiaInstallError::InstallationEndedUnexpectedly) } } impl<C: Connect<Proxy = InstallerProxy> + Send + Sync> Installer for FuchsiaInstaller<C> { type InstallPlan = FuchsiaInstallPlan; type Error = FuchsiaInstallError; type InstallResult = InstallResult; fn perform_install<'a>( &'a mut self, install_plan: &'a FuchsiaInstallPlan, observer: Option<&'a dyn ProgressObserver>, ) -> LocalBoxFuture<'a, (Self::InstallResult, Vec<AppInstallResult<Self::Error>>)> { let is_system_update = install_plan.is_system_update(); async move { let mut app_results = vec![]; for url in &install_plan.update_package_urls { app_results.push(match url { UpdatePackageUrl::System(url) => self .perform_install_system_update(&url, &install_plan.install_source, observer) .await .into(), UpdatePackageUrl::Package(_) => { if is_system_update { AppInstallResult::Deferred } else { todo!("implement installing packages") } } }); } (InstallResult { urgent_update: install_plan.urgent_update }, app_results) } .boxed_local() } fn perform_reboot(&mut self) -> LocalBoxFuture<'_, Result<(), anyhow::Error>> { async move { match self.reboot_controller.take() { Some(reboot_controller) => { reboot_controller .unblock() .context("notify installer it can reboot when ready")?; } None => { // FIXME Need the direct reboot path anymore? connect_to_protocol::<fidl_fuchsia_hardware_power_statecontrol::AdminMarker>()? .reboot(RebootReason::SystemUpdate) .await? .map_err(zx::Status::from_raw) .context("reboot error")?; } } // Device should be rebooting now, do not return because state machine expects // perform_reboot() to block, wait for 5 minutes and if reboot still hasn't happened, // return an error. fasync::Timer::new(Duration::from_secs(60 * 5)).await; Err(anyhow!("timed out while waiting for device to reboot")) } .boxed_local() } fn try_create_install_plan<'a>( &'a self, request_params: &'a RequestParams, response: &'a Response, ) -> LocalBoxFuture<'a, Result<Self::InstallPlan, Self::Error>> { async move { let system_app_id = self.app_set.lock().await.get_system_app_id().to_owned(); try_create_install_plan_impl(request_params, response, system_app_id) } .boxed_local() } } fn try_create_install_plan_impl( request_params: &RequestParams, response: &Response, system_app_id: String, ) -> Result<FuchsiaInstallPlan, FuchsiaInstallError> { let mut update_package_urls = vec![]; let mut urgent_update = false; if response.apps.is_empty() { return Err(FuchsiaInstallError::Failure(anyhow!("No app in Omaha response"))); } for app in &response.apps { if app.status != OmahaStatus::Ok { return Err(FuchsiaInstallError::Failure(anyhow!( "Found non-ok app status for {:?}: {:?}", app.id, app.status ))); } let update_check = if let Some(update_check) = &app.update_check
else { return Err(FuchsiaInstallError::Failure(anyhow!("No update_check in Omaha response"))); }; let urls = match update_check.status { OmahaStatus::Ok => { if let Some(urls) = &update_check.urls { &urls.url } else { return Err(FuchsiaInstallError::Failure(anyhow!("No urls in Omaha response"))); } } OmahaStatus::NoUpdate => { continue; } _ => { if let Some(info) = &update_check.info { warn!("update check status info: {}", info); } return Err(FuchsiaInstallError::Failure(anyhow!( "Unexpected update check status: {:?}", update_check.status ))); } }; let (url, rest) = if let Some((url, rest)) = urls.split_first() { (url, rest) } else { return Err(FuchsiaInstallError::Failure(anyhow!("No url in Omaha response"))); }; if !rest.is_empty() { warn!("Only 1 url is supported, found {}", urls.len()); } let manifest = if let Some(manifest) = &update_check.manifest { manifest } else { return Err(FuchsiaInstallError::Failure(anyhow!("No manifest in Omaha response"))); }; let (package, rest) = if let Some((package, rest)) = manifest.packages.package.split_first() { (package, rest) } else { return Err(FuchsiaInstallError::Failure(anyhow!("No package in Omaha response"))); }; if !rest.is_empty() { warn!("Only 1 package is supported, found {}", manifest.packages.package.len()); } let full_url = url.codebase.clone() + &package.name; let pkg_url = match PkgUrl::parse(&full_url) { Ok(pkg_url) => pkg_url, Err(err) => { return Err(FuchsiaInstallError::Failure(anyhow!( "Failed to parse {} to PkgUrl: {}", full_url, err ))) } }; update_package_urls.push(if app.id == system_app_id { urgent_update = update_check.urgent_update.unwrap_or(false); UpdatePackageUrl::System(pkg_url) } else { UpdatePackageUrl::Package(pkg_url) }); } if update_package_urls.is_empty() { return Err(FuchsiaInstallError::Failure(anyhow!("No app has update available"))); } Ok(FuchsiaInstallPlan { update_package_urls, install_source: request_params.source.clone(), urgent_update, }) } #[cfg(test)] mod tests { use { super::*, assert_matches::assert_matches, fidl_fuchsia_update_installer::{ FailPrepareData, InstallationProgress, InstallerRequest, InstallerRequestStream, RebootControllerRequest, State, UpdateInfo, }, fuchsia_async as fasync, futures::future::BoxFuture, omaha_client::protocol::response::{App, Manifest, Package, Packages, UpdateCheck}, parking_lot::Mutex, std::{convert::TryInto, sync::Arc, task::Poll}, }; const TEST_URL: &str = "fuchsia-pkg://fuchsia.com/update/0"; const TEST_URL_BASE: &str = "fuchsia-pkg://fuchsia.com/"; const TEST_PACKAGE_NAME: &str = "update/0"; #[derive(Debug, PartialEq)] struct Progress { operation: Option<String>, progress: f32, total_size: Option<u64>, size_so_far: Option<u64>, } impl Eq for Progress {} struct MockProgressObserver { progresses: Arc<Mutex<Vec<Progress>>>, } impl MockProgressObserver { fn new() -> Self { Self { progresses: Arc::new(Mutex::new(vec![])) } } fn progresses(&self) -> Arc<Mutex<Vec<Progress>>> { Arc::clone(&self.progresses) } } impl ProgressObserver for MockProgressObserver { fn receive_progress( &self, operation: Option<&str>, progress: f32, total_size: Option<u64>, size_so_far: Option<u64>, ) -> BoxFuture<'_, ()> { let operation = operation.map(|s| s.into()); self.progresses.lock().push(Progress { operation, progress, total_size, size_so_far }); future::ready(()).boxed() } } struct MockConnector { proxy: Option<InstallerProxy>, } impl MockConnector { fn new(proxy: InstallerProxy) -> Self { Self { proxy: Some(proxy) } } fn failing() -> Self { Self { proxy: None } } } impl Connect for MockConnector { type Proxy = InstallerProxy; fn connect(&self) -> Result<Self::Proxy, anyhow::Error> { self.proxy.clone().ok_or(anyhow::anyhow!("no proxy available")) } } fn new_mock_installer() -> (FuchsiaInstaller<MockConnector>, InstallerRequestStream) { let (proxy, stream) = fidl::endpoints::create_proxy_and_stream::<InstallerMarker>().unwrap(); let app = omaha_client::common::App::builder("system_id", [1]).build(); let app_set = Rc::new(AsyncMutex::new(FuchsiaAppSet::new(app))); let installer = FuchsiaInstaller { connector: MockConnector::new(proxy), reboot_controller: None, app_set, }; (installer, stream) } fn new_installer() -> FuchsiaInstaller<ServiceReconnector<InstallerMarker>> { let app = omaha_client::common::App::builder("system_id", [1]).build(); let app_set = Rc::new(AsyncMutex::new(FuchsiaAppSet::new(app))); FuchsiaInstaller::new(app_set) } #[fasync::run_singlethreaded(test)] async fn test_start_update() { let (mut installer, mut stream) = new_mock_installer(); let plan = FuchsiaInstallPlan { update_package_urls: vec![ UpdatePackageUrl::System(TEST_URL.parse().unwrap()), UpdatePackageUrl::Package(TEST_URL.parse().unwrap()), ], install_source: InstallSource::OnDemand, urgent_update: false, }; let observer = MockProgressObserver::new(); let progresses = observer.progresses(); let installer_fut = async move { let (install_result, app_install_results) = installer.perform_install(&plan, Some(&observer)).await; assert_eq!(install_result.urgent_update, false); assert_matches!( app_install_results.as_slice(), &[AppInstallResult::Installed, AppInstallResult::Deferred] ); assert_matches!(installer.reboot_controller, Some(_)); }; let stream_fut = async move { match stream.next().await.unwrap() { Ok(InstallerRequest::StartUpdate { url, options, monitor, reboot_controller, responder, }) => { assert_eq!(url.url, TEST_URL); let Options { initiator, should_write_recovery, allow_attach_to_existing_attempt, } = options.try_into().unwrap(); assert_eq!(initiator, Initiator::User); assert_matches!(reboot_controller, Some(_)); assert_eq!(should_write_recovery, true); assert_eq!(allow_attach_to_existing_attempt, true); responder .send(&mut Ok("00000000-0000-0000-0000-000000000001".to_owned())) .unwrap(); let monitor = monitor.into_proxy().unwrap(); let () = monitor .on_state(&mut State::Stage(fidl_fuchsia_update_installer::StageData { info: Some(UpdateInfo { download_size: Some(1000), ..UpdateInfo::EMPTY }), progress: Some(InstallationProgress { fraction_completed: Some(0.5), bytes_downloaded: Some(500), ..InstallationProgress::EMPTY }), ..fidl_fuchsia_update_installer::StageData::EMPTY })) .await .unwrap(); let () = monitor .on_state(&mut State::WaitToReboot( fidl_fuchsia_update_installer::WaitToRebootData { info: Some(UpdateInfo { download_size: Some(1000), ..UpdateInfo::EMPTY }), progress: Some(InstallationProgress { fraction_completed: Some(1.0), bytes_downloaded: Some(1000), ..InstallationProgress::EMPTY }), ..fidl_fuchsia_update_installer::WaitToRebootData::EMPTY }, )) .await .unwrap(); } request => panic!("Unexpected request: {:?}", request), } }; future::join(installer_fut, stream_fut).await; assert_eq!( *progresses.lock(), vec![ Progress { operation: Some("stage".to_string()), progress: 0.5, total_size: Some(1000), size_so_far: Some(500) }, Progress { operation: Some("wait_to_reboot".to_string()), progress: 1.0, total_size: Some(1000), size_so_far: Some(1000) } ] ); } #[fasync::run_singlethreaded(test)] async fn test_install_error() { let (mut installer, mut stream) = new_mock_installer(); let plan = FuchsiaInstallPlan { update_package_urls: vec![UpdatePackageUrl::System(TEST_URL.parse().unwrap())], install_source: InstallSource::OnDemand, urgent_update: false, }; let installer_fut = async move { assert_matches!( installer.perform_install(&plan, None).await.1.as_slice(), &[AppInstallResult::Failed(FuchsiaInstallError::InstallerFailureState( InstallerFailure { state_name: "fail_prepare", reason: InstallerFailureReason::OutOfSpace } ))] ); }; let stream_fut = async move { match stream.next().await.unwrap() { Ok(InstallerRequest::StartUpdate { monitor, responder, .. }) => { responder .send(&mut Ok("00000000-0000-0000-0000-000000000002".to_owned())) .unwrap(); let monitor = monitor.into_proxy().unwrap(); let () = monitor .on_state(&mut State::FailPrepare(FailPrepareData { reason: Some( fidl_fuchsia_update_installer::PrepareFailureReason::OutOfSpace, ), ..FailPrepareData::EMPTY })) .await .unwrap(); } request => panic!("Unexpected request: {:?}", request), } }; future::join(installer_fut, stream_fut).await; } #[fasync::run_singlethreaded(test)] async fn test_server_close_unexpectedly() { let (mut installer, mut stream) = new_mock_installer(); let plan = FuchsiaInstallPlan { update_package_urls: vec![UpdatePackageUrl::System(TEST_URL.parse().unwrap())], install_source: InstallSource::OnDemand, urgent_update: false, }; let installer_fut = async move { assert_matches!( installer.perform_install(&plan, None).await.1.as_slice(), &[AppInstallResult::Failed(FuchsiaInstallError::InstallationEndedUnexpectedly)] ); }; let stream_fut = async move { match stream.next().await.unwrap() { Ok(InstallerRequest::StartUpdate { monitor, responder, .. }) => { responder .send(&mut Ok("00000000-0000-0000-0000-000000000003".to_owned())) .unwrap(); let monitor = monitor.into_proxy().unwrap(); let () = monitor .on_state(&mut State::Prepare( fidl_fuchsia_update_installer::PrepareData::EMPTY, )) .await .unwrap(); let () = monitor .on_state(&mut State::Fetch(fidl_fuchsia_update_installer::FetchData { info: Some(UpdateInfo { download_size: None, ..UpdateInfo::EMPTY }), progress: Some(InstallationProgress { fraction_completed: Some(0.0), bytes_downloaded: None, ..InstallationProgress::EMPTY }), ..fidl_fuchsia_update_installer::FetchData::EMPTY })) .await .unwrap(); } request => panic!("Unexpected request: {:?}", request), } }; future::join(installer_fut, stream_fut).await; } #[fasync::run_singlethreaded(test)] async fn test_connect_to_installer_failed() { let (mut installer, _) = new_mock_installer(); installer.connector = MockConnector::failing(); let plan = FuchsiaInstallPlan { update_package_urls: vec![UpdatePackageUrl::System(TEST_URL.parse().unwrap())], install_source: InstallSource::OnDemand, urgent_update: false, }; assert_matches!( installer.perform_install(&plan, None).await.1.as_slice(), &[AppInstallResult::Failed(FuchsiaInstallError::Connect(_))] ); } #[test] fn test_reboot() { let mut exec = fasync::TestExecutor::new().unwrap(); let mut installer = new_installer(); let (reboot_controller, mut stream) = fidl::endpoints::create_proxy_and_stream::<RebootControllerMarker>().unwrap(); installer.reboot_controller = Some(reboot_controller); { let mut reboot_future = installer.perform_reboot(); assert_matches!(exec.run_until_stalled(&mut reboot_future), Poll::Pending); assert_matches!(exec.wake_next_timer(), Some(_)); assert_matches!(exec.run_until_stalled(&mut reboot_future), Poll::Ready(Err(_))); } assert_matches!(installer.reboot_controller, None); assert_matches!( exec.run_singlethreaded(stream.next()), Some(Ok(RebootControllerRequest::Unblock { .. })) ); assert_matches!(exec.run_singlethreaded(stream.next()), None); } #[fasync::run_singlethreaded(test)] async fn test_simple_response() { let request_params = RequestParams::default(); let mut update_check = UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]); update_check.manifest = Some(Manifest { packages: Packages::new(vec![Package::with_name(TEST_PACKAGE_NAME)]), ..Manifest::default() }); let response = Response { apps: vec![App { update_check: Some(update_check), id: "system_id".into(), ..App::default() }], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!( install_plan.update_package_urls, vec![UpdatePackageUrl::System(TEST_URL.parse().unwrap())], ); assert_eq!(install_plan.install_source, request_params.source); assert_eq!(install_plan.urgent_update, false); } #[fasync::run_singlethreaded(test)] async fn test_no_app() { let request_params = RequestParams::default(); let response = Response::default(); assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_multiple_app() { let request_params = RequestParams::default(); let system_app = App { update_check: Some(UpdateCheck { manifest: Some(Manifest { packages: Packages::new(vec![Package::with_name(TEST_PACKAGE_NAME)]), ..Manifest::default() }), ..UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]) }), id: "system_id".into(), ..App::default() }; let response = Response { apps: vec![ system_app, App { update_check: Some(UpdateCheck::no_update()), ..App::default() }, ], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!( install_plan.update_package_urls, vec![UpdatePackageUrl::System(TEST_URL.parse().unwrap())], ); assert_eq!(install_plan.install_source, request_params.source); } #[fasync::run_singlethreaded(test)] async fn test_multiple_package_updates() { let request_params = RequestParams::default(); let system_app = App { update_check: Some(UpdateCheck::no_update()), id: "system_id".into(), ..App::default() }; let package1_app = App { update_check: Some(UpdateCheck { manifest: Some(Manifest { packages: Packages::new(vec![Package::with_name("package1")]), ..Manifest::default() }), ..UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]) }), id: "package1_id".into(), ..App::default() }; let package2_app = App { update_check: Some(UpdateCheck::no_update()), id: "package2_id".into(), ..App::default() }; let package3_app = App { update_check: Some(UpdateCheck { manifest: Some(Manifest { packages: Packages::new(vec![Package::with_name("package3")]), ..Manifest::default() }), ..UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]) }), id: "package3_id".into(), ..App::default() }; let response = Response { apps: vec![system_app, package1_app, package2_app, package3_app], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!( install_plan.update_package_urls, vec![ UpdatePackageUrl::Package(format!("{TEST_URL_BASE}package1").parse().unwrap()), UpdatePackageUrl::Package(format!("{TEST_URL_BASE}package3").parse().unwrap()) ] ); assert_eq!(install_plan.install_source, request_params.source); } #[fasync::run_singlethreaded(test)] async fn test_mixed_update() { let request_params = RequestParams::default(); let system_app = App { update_check: Some(UpdateCheck { manifest: Some(Manifest { packages: Packages::new(vec![Package::with_name(TEST_PACKAGE_NAME)]), ..Manifest::default() }), ..UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]) }), id: "system_id".into(), ..App::default() }; let package_app = App { update_check: Some(UpdateCheck { manifest: Some(Manifest { packages: Packages::new(vec![Package::with_name("some-package")]), ..Manifest::default() }), ..UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]) }), id: "package_id".into(), ..App::default() }; let response = Response { apps: vec![package_app, system_app], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!( install_plan.update_package_urls, vec![ UpdatePackageUrl::Package(format!("{TEST_URL_BASE}some-package").parse().unwrap()), UpdatePackageUrl::System(TEST_URL.parse().unwrap()) ], ); assert_eq!(install_plan.install_source, request_params.source); } #[fasync::run_singlethreaded(test)] async fn test_no_update_check() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { id: "system_id".into(), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_no_urls() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { update_check: Some(UpdateCheck::default()), id: "system_id".into(), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_app_error_status() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { status: OmahaStatus::Error("error-unknownApplication".to_string()), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_no_update() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { update_check: Some(UpdateCheck::no_update()), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_invalid_url() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { update_check: Some(UpdateCheck::ok(vec!["invalid-url".to_string()])), id: "system_id".into(), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_no_manifest() { let request_params = RequestParams::default(); let response = Response { apps: vec![App { update_check: Some(UpdateCheck::ok(vec![TEST_URL_BASE.to_string()])), id: "system_id".into(), ..App::default() }], ..Response::default() }; assert_matches!( new_installer().try_create_install_plan(&request_params, &response).await, Err(FuchsiaInstallError::Failure(_)) ); } #[fasync::run_singlethreaded(test)] async fn test_urgent_update_attribute_true() { let request_params = RequestParams::default(); let mut update_check = UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]); update_check.urgent_update = Some(true); update_check.manifest = Some(Manifest { packages: Packages::new(vec![Package::with_name(TEST_PACKAGE_NAME)]), ..Manifest::default() }); let response = Response { apps: vec![App { update_check: Some(update_check), id: "system_id".into(), ..App::default() }], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!(install_plan.urgent_update, true); } #[fasync::run_singlethreaded(test)] async fn test_urgent_update_attribute_false() { let request_params = RequestParams::default(); let mut update_check = UpdateCheck::ok(vec![TEST_URL_BASE.to_string()]); update_check.urgent_update = Some(false); update_check.manifest = Some(Manifest { packages: Packages::new(vec![Package::with_name(TEST_PACKAGE_NAME)]), ..Manifest::default() }); let response = Response { apps: vec![App { update_check: Some(update_check), id: "system_id".into(), ..App::default() }], ..Response::default() }; let install_plan = new_installer().try_create_install_plan(&request_params, &response).await.unwrap(); assert_eq!(install_plan.urgent_update, false); } }
{ update_check }
fp_256.rs
use std::{ cmp::{Ord, Ordering, PartialOrd}, fmt::{Display, Formatter, Result as FmtResult}, io::{Read, Result as IoResult, Write}, marker::PhantomData, ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, str::FromStr, }; use crate::{ biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger256 as BigInteger}, bytes::{FromBytes, ToBytes}, fields::{Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, }; pub trait Fp256Parameters: FpParameters<BigInt = BigInteger> {} #[derive(Derivative)] #[derivative( Default(bound = ""), Hash(bound = ""), Clone(bound = ""), Copy(bound = ""), Debug(bound = ""), PartialEq(bound = ""), Eq(bound = "") )] pub struct Fp256<P>( pub BigInteger, #[derivative(Debug = "ignore")] #[doc(hidden)] pub PhantomData<P>, ); impl<P: Fp256Parameters> Fp256<P> { #[inline] pub fn new(element: BigInteger) -> Self { Fp256::<P>(element, PhantomData) } #[inline] fn is_valid(&self) -> bool { self.0 < P::MODULUS } #[inline] fn reduce(&mut self) { if !self.is_valid() { self.0.sub_noborrow(&P::MODULUS); } } #[inline] fn mont_reduce( &mut self, r0: u64, mut r1: u64, mut r2: u64, mut r3: u64, mut r4: u64, mut r5: u64, mut r6: u64, mut r7: u64, ) { // The Montgomery reduction here is based on Algorithm 14.32 in // Handbook of Applied Cryptography // <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>. let k = r0.wrapping_mul(P::INV); let mut carry = 0; fa::mac_with_carry(r0, k, P::MODULUS.0[0], &mut carry); r1 = fa::mac_with_carry(r1, k, P::MODULUS.0[1], &mut carry); r2 = fa::mac_with_carry(r2, k, P::MODULUS.0[2], &mut carry); r3 = fa::mac_with_carry(r3, k, P::MODULUS.0[3], &mut carry); r4 = fa::adc(r4, 0, &mut carry); let carry2 = carry; let k = r1.wrapping_mul(P::INV); let mut carry = 0; fa::mac_with_carry(r1, k, P::MODULUS.0[0], &mut carry); r2 = fa::mac_with_carry(r2, k, P::MODULUS.0[1], &mut carry); r3 = fa::mac_with_carry(r3, k, P::MODULUS.0[2], &mut carry); r4 = fa::mac_with_carry(r4, k, P::MODULUS.0[3], &mut carry); r5 = fa::adc(r5, carry2, &mut carry); let carry2 = carry; let k = r2.wrapping_mul(P::INV); let mut carry = 0; fa::mac_with_carry(r2, k, P::MODULUS.0[0], &mut carry); r3 = fa::mac_with_carry(r3, k, P::MODULUS.0[1], &mut carry); r4 = fa::mac_with_carry(r4, k, P::MODULUS.0[2], &mut carry); r5 = fa::mac_with_carry(r5, k, P::MODULUS.0[3], &mut carry); r6 = fa::adc(r6, carry2, &mut carry); let carry2 = carry; let k = r3.wrapping_mul(P::INV); let mut carry = 0; fa::mac_with_carry(r3, k, P::MODULUS.0[0], &mut carry); r4 = fa::mac_with_carry(r4, k, P::MODULUS.0[1], &mut carry); r5 = fa::mac_with_carry(r5, k, P::MODULUS.0[2], &mut carry); r6 = fa::mac_with_carry(r6, k, P::MODULUS.0[3], &mut carry); r7 = fa::adc(r7, carry2, &mut carry); (self.0).0[0] = r4; (self.0).0[1] = r5; (self.0).0[2] = r6; (self.0).0[3] = r7; self.reduce(); } } impl<P: Fp256Parameters> Field for Fp256<P> { #[inline] fn zero() -> Self { Fp256::<P>(BigInteger::from(0), PhantomData) } #[inline] fn is_zero(&self) -> bool { self.0.is_zero() } #[inline] fn double(&self) -> Self { let mut temp = *self; temp.double_in_place(); temp } #[inline] fn double_in_place(&mut self) -> &mut Self { // This cannot exceed the backing capacity. self.0.mul2(); // However, it may need to be reduced. self.reduce(); self } #[inline] fn one() -> Self { Fp256::<P>(P::R, PhantomData) } #[inline] fn is_one(&self) -> bool { self == &Self::one() } #[inline] fn characteristic<'a>() -> &'a [u64] { P::MODULUS.as_ref() } #[inline] fn square(&self) -> Self { let mut temp = self.clone(); temp.square_in_place(); temp } #[inline] fn square_in_place(&mut self) -> &mut Self { let mut carry = 0; let r1 = fa::mac_with_carry(0, (self.0).0[0], (self.0).0[1], &mut carry); let r2 = fa::mac_with_carry(0, (self.0).0[0], (self.0).0[2], &mut carry); let r3 = fa::mac_with_carry(0, (self.0).0[0], (self.0).0[3], &mut carry); let r4 = carry; let mut carry = 0; let r3 = fa::mac_with_carry(r3, (self.0).0[1], (self.0).0[2], &mut carry); let r4 = fa::mac_with_carry(r4, (self.0).0[1], (self.0).0[3], &mut carry); let r5 = carry; let mut carry = 0; let r5 = fa::mac_with_carry(r5, (self.0).0[2], (self.0).0[3], &mut carry); let r6 = carry; let r7 = r6 >> 63; let r6 = (r6 << 1) | (r5 >> 63); let r5 = (r5 << 1) | (r4 >> 63); let r4 = (r4 << 1) | (r3 >> 63); let r3 = (r3 << 1) | (r2 >> 63); let r2 = (r2 << 1) | (r1 >> 63); let r1 = r1 << 1; let mut carry = 0; let r0 = fa::mac_with_carry(0, (self.0).0[0], (self.0).0[0], &mut carry); let r1 = fa::adc(r1, 0, &mut carry); let r2 = fa::mac_with_carry(r2, (self.0).0[1], (self.0).0[1], &mut carry); let r3 = fa::adc(r3, 0, &mut carry); let r4 = fa::mac_with_carry(r4, (self.0).0[2], (self.0).0[2], &mut carry); let r5 = fa::adc(r5, 0, &mut carry); let r6 = fa::mac_with_carry(r6, (self.0).0[3], (self.0).0[3], &mut carry); let r7 = fa::adc(r7, 0, &mut carry); self.mont_reduce(r0, r1, r2, r3, r4, r5, r6, r7); self } #[inline] fn inverse(&self) -> Option<Self> { if self.is_zero() { None } else { // Guajardo Kumar Paar Pelzl // Efficient Software-Implementation of Finite Fields with Applications to // Cryptography // Algorithm 16 (BEA for Inversion in Fp) let one = BigInteger::from(1); let mut u = self.0; let mut v = P::MODULUS; let mut b = Fp256::<P>(P::R2, PhantomData); // Avoids unnecessary reduction step. let mut c = Self::zero(); while u != one && v != one { while u.is_even() { u.div2(); if b.0.is_even() { b.0.div2(); } else { b.0.add_nocarry(&P::MODULUS); b.0.div2(); } } while v.is_even() { v.div2(); if c.0.is_even() { c.0.div2(); } else { c.0.add_nocarry(&P::MODULUS); c.0.div2(); } } if v < u { u.sub_noborrow(&v); b.sub_assign(&c); } else { v.sub_noborrow(&u); c.sub_assign(&b); } } if u == one { Some(b) } else { Some(c) } } } fn inverse_in_place(&mut self) -> Option<&mut Self> { if let Some(inverse) = self.inverse() { *self = inverse; Some(self) } else { None } } #[inline] fn frobenius_map(&mut self, _: usize) { // No-op: No effect in a prime field. } } impl<P: Fp256Parameters> PrimeField for Fp256<P> { type Params = P; type BigInt = BigInteger; #[inline] fn from_repr(r: BigInteger) -> Self { let mut r = Fp256(r, PhantomData); if r.is_valid() { r.mul_assign(&Fp256(P::R2, PhantomData)); r } else { Self::zero() } } #[inline] fn into_repr(&self) -> BigInteger { let mut r = *self; r.mont_reduce( (self.0).0[0], (self.0).0[1], (self.0).0[2], (self.0).0[3], 0, 0, 0, 0, ); r.0 } #[inline] fn from_random_bytes(bytes: &[u8]) -> Option<Self> { let mut result = Self::zero(); if result.0.read_le((&bytes[..]).by_ref()).is_ok() { result.0.as_mut()[3] &= 0xffffffffffffffff >> P::REPR_SHAVE_BITS; if result.is_valid() { Some(result) } else { None } } else { None } } #[inline] fn multiplicative_generator() -> Self { Fp256::<P>(P::GENERATOR, PhantomData) } #[inline] fn root_of_unity() -> Self { Fp256::<P>(P::ROOT_OF_UNITY, PhantomData) } } impl<P: Fp256Parameters> SquareRootField for Fp256<P> { #[inline] fn legendre(&self) -> LegendreSymbol { use crate::fields::LegendreSymbol::*; // s = self^((MODULUS - 1) // 2) let s = self.pow(P::MODULUS_MINUS_ONE_DIV_TWO); if s.is_zero() { Zero } else if s.is_one() { QuadraticResidue } else { QuadraticNonResidue } } // Only works for p = 1 (mod 16). #[inline] fn sqrt(&self) -> Option<Self> { sqrt_impl!(Self, P, self) } fn sqrt_in_place(&mut self) -> Option<&mut Self> { if let Some(sqrt) = self.sqrt() { *self = sqrt; Some(self) } else { None } } } impl_prime_field_from_int!(Fp256, u128, Fp256Parameters); impl_prime_field_from_int!(Fp256, u64, Fp256Parameters); impl_prime_field_from_int!(Fp256, u32, Fp256Parameters); impl_prime_field_from_int!(Fp256, u16, Fp256Parameters); impl_prime_field_from_int!(Fp256, u8, Fp256Parameters); impl_prime_field_standard_sample!(Fp256, Fp256Parameters); impl<P: Fp256Parameters> ToBytes for Fp256<P> { #[inline] fn write<W: Write>(&self, writer: W) -> IoResult<()> { self.into_repr().write(writer) } } impl<P: Fp256Parameters> FromBytes for Fp256<P> { #[inline] fn read<R: Read>(reader: R) -> IoResult<Self> { BigInteger::read(reader).map(Fp256::from_repr) } } /// `Fp` elements are ordered lexicographically. impl<P: Fp256Parameters> Ord for Fp256<P> { #[inline(always)] fn cmp(&self, other: &Self) -> Ordering { self.into_repr().cmp(&other.into_repr()) } } impl<P: Fp256Parameters> PartialOrd for Fp256<P> { #[inline(always)] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl<P: Fp256Parameters> FromStr for Fp256<P> { type Err = (); /// Interpret a string of numbers as a (congruent) prime field element. /// Does not accept unnecessary leading zeroes or a blank string. fn from_str(s: &str) -> Result<Self, Self::Err> { if s.is_empty() { return Err(()); } if s == "0" { return Ok(Self::zero()); } let mut res = Self::zero(); let ten = Self::from_repr(<Self as PrimeField>::BigInt::from(10)); let mut first_digit = true; for c in s.chars() { match c.to_digit(10) { Some(c) => { if first_digit { if c == 0 { return Err(()); } first_digit = false; } res.mul_assign(&ten); res.add_assign(&Self::from_repr(<Self as PrimeField>::BigInt::from( u64::from(c), ))); }, None => { return Err(()); }, } } if !res.is_valid() { Err(()) } else { Ok(res) } } } impl<P: Fp256Parameters> Display for Fp256<P> { #[inline] fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { write!(f, "Fp256({})", self.into_repr()) } } impl<P: Fp256Parameters> Neg for Fp256<P> { type Output = Self; #[inline] #[must_use] fn neg(self) -> Self { if !self.is_zero() { let mut tmp = P::MODULUS; tmp.sub_noborrow(&self.0); Fp256::<P>(tmp, PhantomData) } else { self }
impl<'a, P: Fp256Parameters> Add<&'a Fp256<P>> for Fp256<P> { type Output = Self; #[inline] fn add(self, other: &Self) -> Self { let mut result = self; result.add_assign(other); result } } impl<'a, P: Fp256Parameters> Sub<&'a Fp256<P>> for Fp256<P> { type Output = Self; #[inline] fn sub(self, other: &Self) -> Self { let mut result = self; result.sub_assign(other); result } } impl<'a, P: Fp256Parameters> Mul<&'a Fp256<P>> for Fp256<P> { type Output = Self; #[inline] fn mul(self, other: &Self) -> Self { let mut result = self; result.mul_assign(other); result } } impl<'a, P: Fp256Parameters> Div<&'a Fp256<P>> for Fp256<P> { type Output = Self; #[inline] fn div(self, other: &Self) -> Self { let mut result = self; result.mul_assign(&other.inverse().unwrap()); result } } impl<'a, P: Fp256Parameters> AddAssign<&'a Self> for Fp256<P> { #[inline] fn add_assign(&mut self, other: &Self) { // This cannot exceed the backing capacity. self.0.add_nocarry(&other.0); // However, it may need to be reduced self.reduce(); } } impl<'a, P: Fp256Parameters> SubAssign<&'a Self> for Fp256<P> { #[inline] fn sub_assign(&mut self, other: &Self) { // If `other` is larger than `self`, add the modulus to self first. if other.0 > self.0 { self.0.add_nocarry(&P::MODULUS); } self.0.sub_noborrow(&other.0); } } impl<'a, P: Fp256Parameters> MulAssign<&'a Self> for Fp256<P> { #[inline] fn mul_assign(&mut self, other: &Self) { let mut carry = 0; let r0 = fa::mac_with_carry(0, (self.0).0[0], (other.0).0[0], &mut carry); let r1 = fa::mac_with_carry(0, (self.0).0[0], (other.0).0[1], &mut carry); let r2 = fa::mac_with_carry(0, (self.0).0[0], (other.0).0[2], &mut carry); let r3 = fa::mac_with_carry(0, (self.0).0[0], (other.0).0[3], &mut carry); let r4 = carry; let mut carry = 0; let r1 = fa::mac_with_carry(r1, (self.0).0[1], (other.0).0[0], &mut carry); let r2 = fa::mac_with_carry(r2, (self.0).0[1], (other.0).0[1], &mut carry); let r3 = fa::mac_with_carry(r3, (self.0).0[1], (other.0).0[2], &mut carry); let r4 = fa::mac_with_carry(r4, (self.0).0[1], (other.0).0[3], &mut carry); let r5 = carry; let mut carry = 0; let r2 = fa::mac_with_carry(r2, (self.0).0[2], (other.0).0[0], &mut carry); let r3 = fa::mac_with_carry(r3, (self.0).0[2], (other.0).0[1], &mut carry); let r4 = fa::mac_with_carry(r4, (self.0).0[2], (other.0).0[2], &mut carry); let r5 = fa::mac_with_carry(r5, (self.0).0[2], (other.0).0[3], &mut carry); let r6 = carry; let mut carry = 0; let r3 = fa::mac_with_carry(r3, (self.0).0[3], (other.0).0[0], &mut carry); let r4 = fa::mac_with_carry(r4, (self.0).0[3], (other.0).0[1], &mut carry); let r5 = fa::mac_with_carry(r5, (self.0).0[3], (other.0).0[2], &mut carry); let r6 = fa::mac_with_carry(r6, (self.0).0[3], (other.0).0[3], &mut carry); let r7 = carry; self.mont_reduce(r0, r1, r2, r3, r4, r5, r6, r7); } } impl<'a, P: Fp256Parameters> DivAssign<&'a Self> for Fp256<P> { #[inline] fn div_assign(&mut self, other: &Self) { self.mul_assign(&other.inverse().unwrap()); } }
} }
DynamicElement.ts
import Wrapper from './shared/Wrapper'; import Renderer from '../Renderer'; import Block from '../Block'; import FragmentWrapper from './Fragment'; import { b, x } from 'code-red'; import { Identifier } from 'estree'; import DynamicElement from '../../nodes/DynamicElement'; import ElementWrapper from './Element/index'; import create_debugging_comment from './shared/create_debugging_comment'; import Element from '../../nodes/Element'; export default class DynamicElementWrapper extends Wrapper { fragment: FragmentWrapper; node: DynamicElement; elementWrapper: ElementWrapper; block: Block; dependencies: string[]; var: Identifier = { type: 'Identifier', name: 'dynamic_element' }; constructor( renderer: Renderer, block: Block, parent: Wrapper, node: DynamicElement, strip_whitespace: boolean, next_sibling: Wrapper ) { super(renderer, block, parent, node); this.not_static_content(); this.dependencies = node.tag.dynamic_dependencies(); if (this.dependencies.length) { block = block.child({ comment: create_debugging_comment(node, renderer.component), name: renderer.component.get_unique_name('dynamic_element_block'), type: 'dynamic_element' }); renderer.blocks.push(block); } (node as unknown as Element).dynamic_tag = node.tag; this.block = block; this.elementWrapper = new ElementWrapper( renderer, this.block, parent, (node as unknown) as Element, strip_whitespace, next_sibling ); } render(block: Block, parent_node: Identifier, parent_nodes: Identifier) { if (this.dependencies.length === 0) { this.render_static_tag(block, parent_node, parent_nodes); } else { this.render_dynamic_tag(block, parent_node, parent_nodes); } } render_static_tag( _block: Block, parent_node: Identifier, parent_nodes: Identifier ) { this.elementWrapper.render(this.block, parent_node, parent_nodes); } render_dynamic_tag( block: Block, parent_node: Identifier, parent_nodes: Identifier ) { this.elementWrapper.render( this.block, null, (x`#nodes` as unknown) as Identifier ); const has_transitions = !!( this.block.has_intro_method || this.block.has_outro_method ); const dynamic = this.block.has_update_method; const previous_tag = block.get_unique_name('previous_tag'); const snippet = this.node.tag.manipulate(block); block.add_variable(previous_tag, snippet); const not_equal = this.renderer.component.component_options.immutable ? x`@not_equal` : x`@safe_not_equal`; const condition = x`${this.renderer.dirty( this.dependencies )} && ${not_equal}(${previous_tag}, ${previous_tag} = ${snippet})`; block.chunks.init.push(b` let ${this.var} = ${this.block.name}(#ctx);
if (this.renderer.options.hydratable) { block.chunks.claim.push(b`${this.var}.l(${parent_nodes});`); } block.chunks.mount.push( b`${this.var}.m(${parent_node || '#target'}, ${ parent_node ? 'null' : '#anchor' });` ); const anchor = this.get_or_create_anchor(block, parent_node, parent_nodes); const body = b` ${ has_transitions ? b` @group_outros(); @transition_out(${this.var}, 1, 1, @noop); @check_outros(); ` : b`${this.var}.d(1);` } ${this.var} = ${this.block.name}(#ctx); ${this.var}.c(); ${has_transitions && b`@transition_in(${this.var})`} ${this.var}.m(${this.get_update_mount_node(anchor)}, ${anchor}); `; if (dynamic) { block.chunks.update.push(b` if (${condition}) { ${body} } else { ${this.var}.p(#ctx, #dirty); } `); } else { block.chunks.update.push(b` if (${condition}) { ${body} } `); } if (has_transitions) { block.chunks.intro.push(b`@transition_in(${this.var})`); block.chunks.outro.push(b`@transition_out(${this.var})`); } block.chunks.destroy.push(b`${this.var}.d(detaching)`); } }
`); block.chunks.create.push(b`${this.var}.c();`);
text.rs
use std::cmp; use std::fmt; use std::path; use std::collections::BTreeMap; use std::io::Read; use rusttype; use image; use image::RgbaImage; use super::*; /// A font that defines the shape of characters drawn on the screen. /// Can be created from a .ttf file or from an image (bitmap fonts). #[derive(Clone)] pub enum Font { /// A truetype font TTFFont { /// The actual font data font: rusttype::Font<'static>, /// The size of the font points: u32, /// Scale information for the font scale: rusttype::Scale, }, /// A bitmap font where letter widths are infered BitmapFontVariant(BitmapFont), } /// A bitmap font where letter widths are infered #[derive(Clone, Debug)] pub struct BitmapFont { /// The original glyph image bytes: Vec<u8>, /// Width of the image width: usize, /// Height of the image (same as the height of a glyph) height: usize, /// Glyph to horizontal position (in pixels) and span (in pixels) (does not include space) glyphs: BTreeMap<char, (usize, usize)>, /// Width in pixels of the space space_width: usize, letter_separation: usize, } impl BitmapFont { fn span_for(&self, c: char) -> usize { match self.glyphs.get(&c) { Some(&(_, span)) => span, None => { if c == ' ' { self.space_width } else { 0 //No span is defined for this char. // We could error here, but I don't see the point. // We will just render the missing char as nothing and move on, // and the user will see that there is a nothing and if they // do not understand, they will certainly feel silly when // we and ask them what they expected to happen when they // told the system to render a char they never specified. I t // hink I would kind of prefer an implementation that is // guaranteed not to error for any string. // TODO: While this is a perfectly valid preference, I would // prefer fail-noisily to fail-invisibly; we should possibly have // options for either behavior. } } } } } impl Font { /// Load a new TTF font from the given file. pub fn new<P>(context: &mut Context, path: P, points: u32) -> GameResult<Font> where P: AsRef<path::Path> + fmt::Debug, { let mut stream = context.filesystem.open(path.as_ref())?; let mut buf = Vec::new(); stream.read_to_end(&mut buf)?; let name = format!("{:?}", path); // Get the proper DPI to scale font size accordingly let (_diag_dpi, x_dpi, y_dpi) = context.gfx_context.dpi; Font::from_bytes(&name, &buf, points, (x_dpi, y_dpi)) } /// Loads a new TTF font from data copied out of the given buffer. pub fn from_bytes(name: &str, bytes: &[u8], points: u32, dpi: (f32, f32)) -> GameResult<Font> { let font_collection_err = &|_| GameError::ResourceLoadError(format!( "Could not load font collection for \ font {:?}", name )); let collection = rusttype::FontCollection::from_bytes(bytes.to_vec()) .map_err(font_collection_err)?; let font_err = &|_| GameError::ResourceLoadError(format!( "Could not retrieve font from collection for \ font {:?}", name )); let font = collection.into_font() .map_err(font_err)?; let (x_dpi, y_dpi) = dpi; // println!("DPI: {}, {}", x_dpi, y_dpi); let scale = display_independent_scale(points, x_dpi, y_dpi); Ok(Font::TTFFont { font, points, scale, }) } /// Creates a bitmap font from a long image of its alphabet, specified by `path`. /// The width of each individual chars is assumed to be to be /// image(path).width/glyphs.chars().count() pub fn new_bitmap<P: AsRef<path::Path>>( context: &mut Context, path: P, glyphs: &str, ) -> GameResult<Font> { let img = { let mut buf = Vec::new(); let mut reader = context.filesystem.open(path)?; reader.read_to_end(&mut buf)?; image::load_from_memory(&buf)?.to_rgba() }; let (image_width, image_height) = img.dimensions(); let glyph_width = (image_width as usize) / glyphs.len(); let mut glyphs_map: BTreeMap<char, (usize, usize)> = BTreeMap::new(); for (i, c) in glyphs.chars().enumerate() { glyphs_map.insert(c, (i * glyph_width, glyph_width)); } Ok(Font::BitmapFontVariant(BitmapFont { bytes: img.into_vec(), width: image_width as usize, height: image_height as usize, glyphs: glyphs_map, space_width: glyph_width, letter_separation: 0, })) } /// Creates a bitmap font from a long image of its alphabet. /// Each letter must be separated from the last by a fully transparent column of pixels. /// The width of each letter is infered from these letter boundaries. pub fn new_variable_width_bitmap_font<P: AsRef<path::Path>>( context: &mut Context, path: P, glyphs: &str, space_width: usize, //in addition to letter_separation letter_separation: usize, ) -> GameResult<Font> { let img = { let mut buf = Vec::new(); let mut reader = context.filesystem.open(path)?; reader.read_to_end(&mut buf)?; image::load_from_memory(&buf)?.to_rgba() }; let (image_width, image_height) = img.dimensions(); let mut glyphs_map: BTreeMap<char, (usize, usize)> = BTreeMap::new(); let mut start = 0usize; let mut glyphos = glyphs.chars().enumerate(); let column_has_content = |offset: usize, image: &RgbaImage| { //iff any pixel herein has an alpha greater than 0 (0..image_height).any(|ir| image.get_pixel(offset as u32, ir).data[3] > 0) }; while start < image_width as usize { if column_has_content(start, &img) { let mut span = 1; while start + span < image_width as usize && column_has_content(start + span, &img) { span += 1; } let next_char: char = glyphos .next() .ok_or_else(|| { GameError::FontError("I counted more glyphs in the font bitmap than there were chars in the glyphs string. Note, glyphs must not have gaps. A glyph with a transparent column in the middle will read as two glyphs.".into()) })? .1; glyphs_map.insert(next_char, (start, span)); start += span; } start += 1; } let (lb, _) = glyphos.size_hint(); if lb > 0 { return Err(GameError::FontError( "There were more chars in glyphs than I counted in the bitmap!".into(), )); } Ok(Font::BitmapFontVariant(BitmapFont { bytes: img.into_vec(), width: image_width as usize, height: image_height as usize, glyphs: glyphs_map, space_width, letter_separation, })) } /// Returns a baked-in default font: currently DejaVuSerif.ttf /// Note it does create a new `Font` object with every call. pub fn default_font() -> GameResult<Self> { let size = 16; let buf = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/resources/DejaVuSerif.ttf" )); // BUGGO: fix DPI. Get from Context? If we do that we can basically // just make Context always keep the default Font itself... hmm. Font::from_bytes("default", &buf[..], size, (75.0, 75.0)) } /// Get the height of the Font in pixels. /// /// The height of the font includes any spacing, it will be the total height /// a line needs. pub fn get_height(&self) -> usize { match *self { Font::BitmapFontVariant(BitmapFont { height, .. }) => height, Font::TTFFont { scale, .. } => scale.y.ceil() as usize, } } /// Returns the width a line of text needs, in pixels. /// Does not handle line-breaks. pub fn get_width(&self, text: &str) -> usize { match *self { Font::BitmapFontVariant(ref font) => { compute_variable_bitmap_text_rendering_span(text, font) } Font::TTFFont { ref font, scale, .. } => { let v_metrics = font.v_metrics(scale); let offset = rusttype::point(0.0, v_metrics.ascent); let glyphs: Vec<rusttype::PositionedGlyph> = font.layout(text, scale, offset).collect(); text_width(&glyphs) as usize } } } /// Breaks the given text into lines that will not exceed `wrap_limit` pixels /// in length when drawn with the given font. /// It accounts for newlines correctly but does not /// try to break words or handle hyphenated words; it just breaks /// at whitespace. (It also doesn't preserve whitespace.) /// /// Returns a tuple of maximum line width and a `Vec` of wrapped `String`s. pub fn get_wrap(&self, text: &str, wrap_limit: usize) -> (usize, Vec<String>) { let mut broken_lines = Vec::new(); for line in text.lines() { let mut current_line = Vec::new(); for word in line.split_whitespace() { // I'm sick of trying to do things the clever way and // build up a line word by word while tracking how // long it should be, so instead I just re-render the whole // line, incrementally adding a word at a time until it // becomes too long. // This is not the most efficient way but it is simple and // it works. let mut prospective_line = current_line.clone(); prospective_line.push(word); let text = prospective_line.join(" "); let prospective_line_width = self.get_width(&text); if prospective_line_width > wrap_limit { // Current line is long enough, keep it broken_lines.push(current_line.join(" ")); // and overflow the current word onto the next line. current_line.clear(); current_line.push(word); } else { // Current line with the added word is still short enough current_line.push(word); } } // Push the last line of the text broken_lines.push(current_line.join(" ")); } // If we have a line with only whitespace on it, // this results in the unwrap_or value. // And we can't create a texture of size 0, so // we put 1 here. // Not entirely sure what this will actually result // in though; hopefully a blank line. let max_line_length = broken_lines .iter() .map(|line| self.get_width(line)) .max() .unwrap_or(1); (max_line_length, broken_lines) } } impl fmt::Debug for Font { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Font::TTFFont { .. } => write!(f, "<TTFFont: {:p}>", &self), Font::BitmapFontVariant(BitmapFont { .. }) => write!(f, "<BitmapFont: {:p}>", &self), } } } /// Drawable text created from a `Font`. #[derive(Clone)] pub struct Text { texture: Image, contents: String, blend_mode: Option<BlendMode>, } /// Compute a scale for a font of a given size. // This basically does the points->pixels unit conversion, // taking the display DPI into account. fn display_independent_scale(points: u32, dpi_w: f32, dpi_h: f32) -> rusttype::Scale { // Calculate pixels per point let points = points as f32; let points_per_inch = 72.0; let pixels_per_point_w = dpi_w * (1.0 / points_per_inch); let pixels_per_point_h = dpi_h * (1.0 / points_per_inch); // rusttype::Scale is in units of pixels, so. rusttype::Scale { x: pixels_per_point_w * points, y: pixels_per_point_h * points, } } fn text_width(glyphs: &[rusttype::PositionedGlyph]) -> f32 { glyphs .iter() .rev() .filter_map(|g| { g.pixel_bounding_box() .map(|b| b.min.x as f32 + g.unpositioned().h_metrics().advance_width) }) .next() .unwrap_or(0.0) } fn render_ttf( context: &mut Context, text: &str, font: &rusttype::Font<'static>, scale: rusttype::Scale, ) -> GameResult<Text> { // Ripped almost wholesale from // https://github.com/dylanede/rusttype/blob/master/examples/simple.rs let text_height_pixels = scale.y.ceil() as usize; let v_metrics = font.v_metrics(scale); let offset = rusttype::point(0.0, v_metrics.ascent); // Then turn them into an array of positioned glyphs... // `layout()` turns an abstract glyph, which contains no concrete // size or position information, into a PositionedGlyph, which does. let glyphs: Vec<rusttype::PositionedGlyph> = font.layout(text, scale, offset).collect(); // If the string is empty or only whitespace, we end up trying to create a 0-width // texture which is invalid. Instead we create a texture 1 texel wide, with everything // set to zero, which probably isn't ideal but is 100% consistent and doesn't require // special-casing things like get_filter(). // See issue #109 let text_width_pixels = cmp::max(text_width(&glyphs).ceil() as usize, 1); let bytes_per_pixel = 4; let mut pixel_data = vec![0; text_width_pixels * text_height_pixels * bytes_per_pixel]; let pitch = text_width_pixels * bytes_per_pixel; // Now we actually render the glyphs to a bitmap... for g in glyphs { if let Some(bb) = g.pixel_bounding_box() { // v is the amount of the pixel covered // by the glyph, in the range 0.0 to 1.0 g.draw(|x, y, v| { let c = (v * 255.0) as u8; let x = x as i32 + bb.min.x; let y = y as i32 + bb.min.y; // There's still a possibility that the glyph clips the boundaries of the bitmap if x >= 0 && x < text_width_pixels as i32 && y >= 0 && y < text_height_pixels as i32 { let x = x as usize * bytes_per_pixel; let y = y as usize; pixel_data[(x + y * pitch)] = 255; pixel_data[(x + y * pitch + 1)] = 255; pixel_data[(x + y * pitch + 2)] = 255; pixel_data[(x + y * pitch + 3)] = c; } }) } } // Copy the bitmap into an image, and we're basically done! assert!(text_width_pixels < u16::MAX as usize); assert!(text_height_pixels < u16::MAX as usize); let image = Image::from_rgba8( context, text_width_pixels as u16, text_height_pixels as u16, &pixel_data, )?; let text_string = text.to_string(); Ok(Text { texture: image, contents: text_string, blend_mode: None, }) } /// Treats src and dst as row-major 2D arrays, and blits the given rect from src to dst. /// Does no bounds checking or anything; if you feed it invalid bounds it will just panic. /// Generally, you shouldn't need to use this directly. #[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] fn blit( dst: &mut [u8], dst_dims: (usize, usize), dst_point: (usize, usize), src: &[u8], src_dims: (usize, usize), src_point: (usize, usize), rect_size: (usize, usize), pitch: usize, ) { // The rect properties are all f32's; we truncate them down to integers. let area_row_width = rect_size.0 * pitch; let src_row_width = src_dims.0 * pitch; let dst_row_width = dst_dims.0 * pitch; for row_idx in 0..rect_size.1 { let src_row = row_idx + src_point.1; let dst_row = row_idx + dst_point.1; let src_offset = src_row * src_row_width + (src_point.0 * pitch); let dst_offset = dst_row * dst_row_width + (dst_point.0 * pitch); // println!("from {} to {}, width {}", // dst_offset, // src_offset, // area_row_width); let dst_slice = &mut dst[dst_offset..(dst_offset + area_row_width)]; let src_slice = &src[src_offset..(src_offset + area_row_width)]; dst_slice.copy_from_slice(src_slice); } } struct VariableFontCharIter<'a> { font: &'a BitmapFont, iter: ::std::str::Chars<'a>, offset: usize, } impl<'a> Iterator for VariableFontCharIter<'a> { // iterates over each char in a line of text, finding the horizontal // offsets at which they will appear on the screen, relative to the origin. type Item = (char, usize, usize); //(letter, offset, letter_render_span) fn next(&mut self) -> Option<Self::Item> { if let Some(c) = self.iter.next() { let char_span = self.font.span_for(c); let this_offset = self.offset; self.offset += char_span + self.font.letter_separation; Some((c, this_offset, char_span)) } else { None } } } impl<'a> VariableFontCharIter<'a> { fn new(text: &'a str, font: &'a BitmapFont) -> VariableFontCharIter<'a> { VariableFontCharIter { font, iter: text.chars(), offset: 0, } } } fn compute_variable_bitmap_text_rendering_span(text: &str, font: &BitmapFont) -> usize { VariableFontCharIter::new(text, font) .last() .map(|(_, offset, span)| offset + span) .unwrap_or(0) } fn render_dynamic_bitmap(context: &mut Context, text: &str, font: &BitmapFont) -> GameResult<Text> { let image_span = compute_variable_bitmap_text_rendering_span(text, font); // Same at-least-one-pixel-wide constraint here as with TTF fonts. let buf_len = cmp::max(image_span * font.height * 4, 1); let mut dest_buf = Vec::with_capacity(buf_len); dest_buf.resize(buf_len, 0u8); for (c, offset, _) in VariableFontCharIter::new(text, font) { let (coffset, cspan) = *font.glyphs.get(&c).unwrap_or(&(0, 0)); blit( &mut dest_buf, (image_span, font.height), (offset, 0), &font.bytes, (font.width, font.height), (coffset, 0), (cspan, font.height), 4, ); } let image = Image::from_rgba8(context, image_span as u16, font.height as u16, &dest_buf)?; let text_string = text.to_string(); Ok(Text { texture: image, contents: text_string, blend_mode: None, }) } impl Text { /// Renders a new `Text` from the given `Font`. /// /// Note that this is relatively computationally expensive; /// if you want to draw text every frame you probably want to save /// it and only update it when the text changes. pub fn new(context: &mut Context, text: &str, font: &Font) -> GameResult<Text> { match *font { Font::TTFFont { font: ref f, scale, .. } => render_ttf(context, text, f, scale), Font::BitmapFontVariant(ref font) => render_dynamic_bitmap(context, text, font), } } /// Returns the width of the rendered text, in pixels. pub fn width(&self) -> u32 { self.texture.width() } /// Returns the height of the rendered text, in pixels. pub fn height(&self) -> u32 { self.texture.height() } /// Returns the string that the text represents. pub fn contents(&self) -> &str { &self.contents } /// Returns the dimensions of the rendered text. pub fn get_dimensions(&self) -> Rect { self.texture.get_dimensions() } /// Get the filter mode for the the rendered text. pub fn get_filter(&self) -> FilterMode { self.texture.get_filter() } /// Set the filter mode for the the rendered text. pub fn set_filter(&mut self, mode: FilterMode) { self.texture.set_filter(mode); } /// Returns a reference to the `Image` contained /// by the `Text` object. pub fn get_image(&self) -> &Image { &self.texture } /// Returns a mutable reference to the `Image` contained /// by the `Text` object. pub fn
(&mut self) -> &mut Image { &mut self.texture } /// Unwraps the `Image` contained /// by the `Text` object. pub fn into_inner(self) -> Image { self.texture } } impl Drawable for Text { fn draw_ex(&self, ctx: &mut Context, param: DrawParam) -> GameResult<()> { draw_ex(ctx, &self.texture, param) } fn set_blend_mode(&mut self, mode: Option<BlendMode>) { self.blend_mode = mode; } fn get_blend_mode(&self) -> Option<BlendMode> { self.blend_mode } } impl fmt::Debug for Text { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "<Text: {}x{}, {:p}>", self.texture.width, self.texture.height, &self ) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_blit() { let dst = &mut [0; 125][..]; let src = &[ 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ][..]; assert_eq!(src.len(), 25 * 5); // Test just blitting the whole thing let rect_dims = (25, 5); blit(dst, rect_dims, (0, 0), src, rect_dims, (0, 0), (25, 5), 1); //println!("{:?}", src); //println!("{:?}", dst); assert_eq!(dst, src); for i in 0..dst.len() { dst[i] = 0; } // Test blitting the whole thing with a non-1 pitch let rect_dims = (5, 5); blit(dst, rect_dims, (0, 0), src, rect_dims, (0, 0), (5, 5), 5); assert_eq!(dst, src); } #[test] fn test_metrics() { let f = Font::default_font().expect("Could not get default font"); assert_eq!(f.get_height(), 17); assert_eq!(f.get_width("Foo!"), 33); // http://www.catipsum.com/index.php let text_to_wrap = "Walk on car leaving trail of paw prints on hood and windshield sniff \ other cat's butt and hang jaw half open thereafter for give attitude. \ Annoy kitten\nbrother with poking. Mrow toy mouse squeak roll over. \ Human give me attention meow."; let (len, v) = f.get_wrap(text_to_wrap, 250); println!("{} {:?}", len, v); assert_eq!(len, 249); /* let wrapped_text = vec![ "Walk on car leaving trail of paw prints", "on hood and windshield sniff other", "cat\'s butt and hang jaw half open", "thereafter for give attitude. Annoy", "kitten", "brother with poking. Mrow toy", "mouse squeak roll over. Human give", "me attention meow." ]; */ let wrapped_text = vec![ "Walk on car leaving trail of paw", "prints on hood and windshield", "sniff other cat\'s butt and hang jaw", "half open thereafter for give", "attitude. Annoy kitten", "brother with poking. Mrow toy", "mouse squeak roll over. Human", "give me attention meow.", ]; assert_eq!(&v, &wrapped_text); } // We sadly can't have this test in the general case because it needs to create a Context, // which creates a window, which fails on a headless server like our CI systems. :/ //#[test] #[allow(dead_code)] fn test_wrapping() { use conf; let c = conf::Conf::new(); let ctx = &mut Context::load_from_conf("test_wrapping", "ggez", c) .expect("Could not create context?"); let font = Font::default_font().expect("Could not get default font"); let text_to_wrap = "Walk on car leaving trail of paw prints on hood and windshield sniff \ other cat's butt and hang jaw half open thereafter for give attitude. \ Annoy kitten\nbrother with poking. Mrow toy mouse squeak roll over. \ Human give me attention meow."; let wrap_length = 250; let (len, v) = font.get_wrap(text_to_wrap, wrap_length); assert!(len < wrap_length); for line in &v { let t = Text::new(ctx, line, &font).unwrap(); println!( "Width is claimed to be <= {}, should be <= {}, is {}", len, wrap_length, t.width() ); // Why does this not match? x_X //assert!(t.width() as usize <= len); assert!(t.width() as usize <= wrap_length); } } }
get_image_mut
evaluator.rs
//! This module contains the framework for evaluation, which manifests //! as: //! //! * the trait `Evaluate` //! //! * the thread-local state function `should_eval_to_reference` //! //! * the function `eval_to_reference`, which is called by `setf` //! //! * the unsafe function `eval_from_stack`, which is a part of the //! read-eval-print loop use crate::prelude::*; use crate::types::ExpandedObject; use std::cell::Cell; thread_local! { static EVAL_TO_REFERENCE: Cell<bool> = { Cell::new(false) }; } fn should_eval_to_reference() -> bool { EVAL_TO_REFERENCE.with(|r| r.get()) } pub fn eval_to_reference(obj: Object) -> Object { let old_e = EVAL_TO_REFERENCE.with(|r| r.replace(true)); let res = obj.evaluate(); EVAL_TO_REFERENCE.with(|r| { r.set(old_e); }); res } pub unsafe fn eval_from_stack() { let to_eval = match stack::nth_arg(0) { Ok(o) => *o, Err(e) => { stack::close_stack_frame_and_return(<GcRef<Error>>::from(e).into()); return; } }; stack::close_stack_frame_and_return(to_eval.evaluate()); } pub trait Evaluate { fn evaluate(&self) -> Object; } impl Evaluate for Object { /// `evaluate`, like most operations on `Object`s, involves /// deconstructing `self` into an `ExpandedObject` and then /// calling `evaluate` on that. fn
(&self) -> Object { info!("Evaluating {}.", self); (*self)?; let mut o = self.expand_quiet().evaluate(); if !should_eval_to_reference() { while let Some(r) = Reference::maybe_from(o) { o = *r; } } o } } impl Evaluate for ExpandedObject { /// Floats, `Immediate`s, `Function`s and `Namespace`s are all /// self-evaluating. `Reference`s evaluate to the value they /// dereference to. `HeapObject`s evaluate by dereferencing and /// evaluating themselves. `Symbol`s are looked up. `Cons`es are /// the only `Object`s with a serious, beefy `evaluate` /// implementation. fn evaluate(&self) -> Object { match *self { ExpandedObject::Float(n) => Object::from(n), ExpandedObject::Immediate(i) => Object::from(i), ExpandedObject::Reference(ref r) => **r, ExpandedObject::Symbol(s) => s.evaluate(), ExpandedObject::Function(f) => Object::from(f), ExpandedObject::Cons(c) => c.evaluate(), ExpandedObject::Namespace(n) => Object::from(n), ExpandedObject::HeapObject(h) => (**h).evaluate(), ExpandedObject::QuietError(e) => Object::quiet_error(e), } } }
evaluate
test_path.py
""" test_path.py - Test the path module. This only runs on Posix and NT right now. I would like to have more tests. You can help! Just add appropriate pathnames for your platform (os.name) in each place where the p() function is called. Then send me the result. If you can't get the test to run at all on your platform, there's probably a bug in path.py -- please let me know! TempDirTestCase.testTouch() takes a while to run. It sleeps a few seconds to allow some time to pass between calls to check the modify time on files. URL: http://www.jorendorff.com/articles/python/path Author: Jason Orendorff <[email protected]> Date: 7 Mar 2004 """ import unittest import codecs, os, random, shutil, tempfile, time, stat, sys from scriptine._path import path, __version__ as path_version # This should match the version of path.py being tested. __version__ = '2.2' if sys.platform in ('cygwin', 'win32'): is_windows = True else: is_windows = False def p(**choices): """ Choose a value from several possible values, based on os.name """ return choices[os.name] class BasicTestCase(unittest.TestCase): def testRelpath(self): root = path(p(nt='C:\\', posix='/')) foo = root / 'foo' quux = foo / 'quux' bar = foo / 'bar' boz = bar / 'Baz' / 'Boz' up = path(os.pardir) # basics self.assertEqual(root.relpathto(boz), path('foo') / 'bar' / 'Baz' / 'Boz') self.assertEqual(bar.relpathto(boz), path('Baz') / 'Boz') self.assertEqual(quux.relpathto(boz), up / 'bar' / 'Baz' / 'Boz') self.assertEqual(boz.relpathto(quux), up / up / up / 'quux') self.assertEqual(boz.relpathto(bar), up / up) # x.relpathto(x) == curdir self.assertEqual(root.relpathto(root), os.curdir) self.assertEqual(boz.relpathto(boz), os.curdir) # Make sure case is properly noted (or ignored) self.assertEqual(boz.relpathto(boz.normcase()), os.curdir) # relpath() cwd = path(os.getcwd()) self.assertEqual(boz.relpath(), cwd.relpathto(boz)) if os.name == 'nt': # Check relpath across drives. d = path('D:\\') self.assertEqual(d.relpathto(boz), boz) def testStringCompatibility(self): """ Test compatibility with ordinary strings. """ x = path('xyzzy') self.assert_(x == 'xyzzy') self.assert_(x == u'xyzzy') # sorting items = [path('fhj'), path('fgh'), 'E', path('d'), 'A', path('B'), 'c'] items.sort() self.assert_(items == ['A', 'B', 'E', 'c', 'd', 'fgh', 'fhj']) # Test p1/p1. p1 = path("foo") p2 = path("bar") self.assertEqual(p1 / p2, p(nt='foo\\bar', posix='foo/bar')) def testProperties(self): # Create sample path object. f = p(nt='C:\\Program Files\\Python\\Lib\\xyzzy.py', posix='/usr/local/python/lib/xyzzy.py') f = path(f) # .parent self.assertEqual(f.parent, p(nt='C:\\Program Files\\Python\\Lib', posix='/usr/local/python/lib')) # .name self.assertEqual(f.name, 'xyzzy.py') self.assertEqual(f.parent.name, p(nt='Lib', posix='lib')) # .ext self.assertEqual(f.ext, '.py') self.assertEqual(f.parent.ext, '') # .drive self.assertEqual(f.drive, p(nt='C:', posix='')) def testMethods(self): # .abspath() self.assertEqual(path(os.curdir).abspath(), os.getcwd()) # .getcwd() cwd = path.cwd() self.assert_(isinstance(cwd, path)) self.assertEqual(cwd, os.getcwd()) def testUNC(self): if hasattr(os.path, 'splitunc'): p = path(r'\\python1\share1\dir1\file1.txt') self.assert_(p.uncshare == r'\\python1\share1') self.assert_(p.splitunc() == os.path.splitunc(str(p))) class TempDirTestCase(unittest.TestCase): def setUp(self): # Create a temporary directory. f = tempfile.mktemp() system_tmp_dir = os.path.dirname(f) my_dir = 'testpath_tempdir_' + str(random.random())[2:] self.tempdir = os.path.join(system_tmp_dir, my_dir) os.mkdir(self.tempdir) def tearDown(self): shutil.rmtree(self.tempdir) def testTouch(self): # NOTE: This test takes a long time to run (~10 seconds). # It sleeps several seconds because on Windows, the resolution # of a file's mtime and ctime is about 2 seconds. # # atime isn't tested because on Windows the resolution of atime # is something like 24 hours. sleep_time = 2.0 if is_windows: sleep_time = 5.0 d = path(self.tempdir) f = d / 'test.txt' t0 = time.time() - sleep_time / 2 f.touch() t1 = time.time() + sleep_time / 2 try: self.assert_(f.exists()) self.assert_(f.isfile()) self.assertEqual(f.size(), 0) self.assert_(t0 <= f.mtime() <= t1) if hasattr(os.path, 'getctime'): ct = f.ctime() self.assert_(t0 <= ct <= t1) time.sleep(sleep_time) fobj = open(f, 'ab') fobj.write(b'some bytes') fobj.close() time.sleep(sleep_time) f_new = d / 'test_new.txt' f_new.touch() self.assert_(f_new.newer(f)) t2 = time.time() - sleep_time / 2 f.touch() t3 = time.time() + sleep_time / 2 assert t0 <= t1 < t2 <= t3 # sanity check self.assert_(f.exists()) self.assert_(f.isfile()) self.assertEqual(f.size(), 10) self.assert_(t2 <= f.mtime() <= t3) if hasattr(os.path, 'getctime'): ct2 = f.ctime() if os.name == 'nt': # On Windows, "ctime" is CREATION time self.assertEqual(ct, ct2) self.assert_(ct2 < t2) else: # On other systems, it might be the CHANGE time # (especially on Unix, time of inode changes) self.failUnless(ct == ct2 or ct2 == f.mtime()) finally: f.remove() def testListing(self): d = path(self.tempdir) self.assertEqual(d.listdir(), []) f = 'testfile.txt' af = d / f self.assertEqual(af, os.path.join(d, f)) af.touch() try: self.assert_(af.exists()) self.assertEqual(d.listdir(), [af]) # .glob() self.assertEqual(d.glob('testfile.txt'), [af]) self.assertEqual(d.glob('test*.txt'), [af]) self.assertEqual(d.glob('*.txt'), [af]) self.assertEqual(d.glob('*txt'), [af]) self.assertEqual(d.glob('*'), [af]) self.assertEqual(d.glob('*.html'), []) self.assertEqual(d.glob('testfile'), []) finally: af.remove() # Try a test with 20 files files = [d / ('%d.txt' % i) for i in range(20)] for f in files: fobj = open(f, 'w') fobj.write('some text\n') fobj.close() try: files2 = d.listdir() files.sort() files2.sort() self.assertEqual(files, files2) finally: for f in files: try: f.remove() except: pass def testMakeDirs(self): d = path(self.tempdir) # Placeholder file so that when removedirs() is called, # it doesn't remove the temporary directory itself. tempf = d / 'temp.txt' tempf.touch() try: foo = d / 'foo' boz = foo / 'bar' / 'baz' / 'boz' boz.makedirs() try: self.assert_(boz.isdir()) finally: boz.removedirs() self.failIf(foo.exists()) self.assert_(d.exists()) foo.mkdir(0o750) boz.makedirs(0o700) try: self.assert_(boz.isdir()) finally: boz.removedirs() self.failIf(foo.exists()) self.assert_(d.exists()) finally: os.remove(tempf) def assertSetsEqual(self, a, b): ad = {} for i in a: ad[i] = None bd = {} for i in b: bd[i] = None self.assertEqual(ad, bd) def testShutil(self): # Note: This only tests the methods exist and do roughly what # they should, neglecting the details as they are shutil's # responsibility. d = path(self.tempdir) testDir = d / 'testdir' testFile = testDir / 'testfile.txt' testA = testDir / 'A' testCopy = testA / 'testcopy.txt' testLink = testA / 'testlink.txt' testInstall = testA / 'testinstall.txt' testB = testDir / 'B' testC = testB / 'C' testCopyOfLink = testC / testA.relpathto(testLink) # Create test dirs and a file testDir.mkdir()
f = open(testFile, 'w') f.write('x' * 10000) f.close() # Test simple file copying. testFile.copyfile(testCopy) self.assert_(testCopy.isfile()) self.assert_(testFile.bytes() == testCopy.bytes()) # Test copying into a directory. testCopy2 = testA / testFile.name testFile.copy(testA) self.assert_(testCopy2.isfile()) self.assert_(testFile.bytes() == testCopy2.bytes()) # Test install file testFile.install(testInstall, chmod=0o740) stats = stat.S_IMODE(testInstall.stat().st_mode) self.assert_(stats == stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP) testInstall.unlink() # Make a link for the next test to use. if hasattr(os, 'symlink'): testFile.symlink(testLink) else: testFile.copy(testLink) # fallback # Test copying directory tree. testA.copytree(testC) self.assert_(testC.isdir()) self.assertSetsEqual( testC.listdir(), [testC / testCopy.name, testC / testFile.name, testCopyOfLink]) self.assert_(not testCopyOfLink.islink()) # Clean up for another try. testC.rmtree() self.assert_(not testC.exists()) # Copy again, preserving symlinks. testA.copytree(testC, True) self.assert_(testC.isdir()) self.assertSetsEqual( testC.listdir(), [testC / testCopy.name, testC / testFile.name, testCopyOfLink]) if hasattr(os, 'symlink'): self.assert_(testCopyOfLink.islink()) self.assert_(testCopyOfLink.readlink() == testFile) # Clean up. testDir.rmtree() self.assert_(not testDir.exists()) self.assertList(d.listdir(), []) def assertList(self, listing, expected): listing = list(listing) listing.sort() expected = list(expected) expected.sort() self.assertEqual(listing, expected) def testPatterns(self): d = path(self.tempdir) names = ['x.tmp', 'x.xtmp', 'x2g', 'x22', 'x.txt'] dirs = [d, d / 'xdir', d / 'xdir.tmp', d / 'xdir.tmp' / 'xsubdir'] for e in dirs: if not e.isdir(): e.makedirs() for name in names: (e / name).touch() self.assertList(d.listdir('*.tmp'), [d / 'x.tmp', d / 'xdir.tmp']) self.assertList(d.files('*.tmp'), [d / 'x.tmp']) self.assertList(d.dirs('*.tmp'), [d / 'xdir.tmp']) self.assertList(d.walk(), [e for e in dirs if e != d] + [e / n for e in dirs for n in names]) self.assertList(d.walk('*.tmp'), [e / 'x.tmp' for e in dirs] + [d / 'xdir.tmp']) self.assertList(d.walkfiles('*.tmp'), [e / 'x.tmp' for e in dirs]) self.assertList(d.walkdirs('*.tmp'), [d / 'xdir.tmp']) def testUnicode(self): d = path(self.tempdir) p = d / 'unicode.txt' def test(enc): """ Test that path works with the specified encoding, which must be capable of representing the entire range of Unicode codepoints. """ given = (u'Hello world\n' u'\u0d0a\u0a0d\u0d15\u0a15\r\n' u'\u0d0a\u0a0d\u0d15\u0a15\x85' u'\u0d0a\u0a0d\u0d15\u0a15\u2028' u'\r' u'hanging') clean = (u'Hello world\n' u'\u0d0a\u0a0d\u0d15\u0a15\n' u'\u0d0a\u0a0d\u0d15\u0a15\n' u'\u0d0a\u0a0d\u0d15\u0a15\n' u'\n' u'hanging') givenLines = [ u'Hello world\n', u'\u0d0a\u0a0d\u0d15\u0a15\r\n', u'\u0d0a\u0a0d\u0d15\u0a15\x85', u'\u0d0a\u0a0d\u0d15\u0a15\u2028', u'\r', u'hanging'] expectedLines = [ u'Hello world\n', u'\u0d0a\u0a0d\u0d15\u0a15\n', u'\u0d0a\u0a0d\u0d15\u0a15\n', u'\u0d0a\u0a0d\u0d15\u0a15\n', u'\n', u'hanging'] expectedLines2 = [ u'Hello world', u'\u0d0a\u0a0d\u0d15\u0a15', u'\u0d0a\u0a0d\u0d15\u0a15', u'\u0d0a\u0a0d\u0d15\u0a15', u'', u'hanging'] # write bytes manually to file f = codecs.open(p, 'w', enc) f.write(given) f.close() # test all 3 path read-fully functions, including # path.lines() in unicode mode. self.assertEqual(p.bytes(), given.encode(enc)) self.assertEqual(p.text(enc), clean) self.assertEqual(p.lines(enc), expectedLines) self.assertEqual(p.lines(enc, retain=False), expectedLines2) # If this is UTF-16, that's enough. # The rest of these will unfortunately fail because append=True mode # causes an extra BOM to be written in the middle of the file. # UTF-16 is the only encoding that has this problem. if enc == 'UTF-16': return # Write Unicode to file using path.write_text(). cleanNoHanging = clean + u'\n' # This test doesn't work with a hanging line. p.write_text(cleanNoHanging, enc) p.write_text(cleanNoHanging, enc, append=True) # Check the result. expectedBytes = 2 * cleanNoHanging.replace('\n', os.linesep).encode(enc) expectedLinesNoHanging = expectedLines[:] expectedLinesNoHanging[-1] += '\n' self.assertEqual(p.bytes(), expectedBytes) self.assertEqual(p.text(enc), 2 * cleanNoHanging) self.assertEqual(p.lines(enc), 2 * expectedLinesNoHanging) self.assertEqual(p.lines(enc, retain=False), 2 * expectedLines2) # Write Unicode to file using path.write_lines(). # The output in the file should be exactly the same as last time. p.write_lines(expectedLines, enc) p.write_lines(expectedLines2, enc, append=True) # Check the result. self.assertEqual(p.bytes(), expectedBytes) # Now: same test, but using various newline sequences. # If linesep is being properly applied, these will be converted # to the platform standard newline sequence. p.write_lines(givenLines, enc) p.write_lines(givenLines, enc, append=True) # Check the result. self.assertEqual(p.bytes(), expectedBytes) # Same test, using newline sequences that are different # from the platform default. def testLinesep(eol): p.write_lines(givenLines, enc, linesep=eol) p.write_lines(givenLines, enc, linesep=eol, append=True) expected = 2 * cleanNoHanging.replace(u'\n', eol).encode(enc) self.assertEqual(p.bytes(), expected) testLinesep(u'\n') testLinesep(u'\r') testLinesep(u'\r\n') testLinesep(u'\x0d\x85') # Again, but with linesep=None. p.write_lines(givenLines, enc, linesep=None) p.write_lines(givenLines, enc, linesep=None, append=True) # Check the result. expectedBytes = 2 * given.encode(enc) self.assertEqual(p.bytes(), expectedBytes) self.assertEqual(p.text(enc), 2 * clean) expectedResultLines = expectedLines[:] expectedResultLines[-1] += expectedLines[0] expectedResultLines += expectedLines[1:] self.assertEqual(p.lines(enc), expectedResultLines) test('UTF-8') test('UTF-16BE') test('UTF-16LE') test('UTF-16') if __name__ == '__main__': if __version__ != path_version: print("Version mismatch: test_path.py version %s, path version %s" % (__version__, path_version)) unittest.main()
testA.mkdir() testB.mkdir()
xmlprocess.go
package backend import ( "encoding/xml" "fmt" ) type XmlProcess struct { XMLName xml.Name `xml:"process"` Name string `xml:"name,attr"` InputChannels []XmlInChannel `xml:"input-channel"` OutputChannels []XmlOutChannel `xml:"output-channel"` } func XmlProcessNew(name string) *XmlProcess { return &XmlProcess{xml.Name{freespNamespace, "process"}, name, nil, nil} } func (p *XmlProcess) Read(data []byte) (cnt int, err error) { err = xml.Unmarshal(data, p) if err != nil
cnt = len(data) return } func (p *XmlProcess) Write() (data []byte, err error) { data, err = xml.MarshalIndent(p, "", " ") if err != nil { err = fmt.Errorf("XmlConnect.Write error: %v", err) } return }
{ err = fmt.Errorf("XmlConnect.Read error: %v", err) }
pkcs7_padder.go
package s3crypto // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Licensed under the MIT License. Copyright (c) 2016 Carl Jackson import ( "bytes" "crypto/subtle" "github.com/pendo-io/aws-sdk-go/aws/awserr" ) const ( pkcs7MaxPaddingSize = 255 ) type pkcs7Padder struct { blockSize int } // NewPKCS7Padder follows the RFC 2315: https://www.ietf.org/rfc/rfc2315.txt // PKCS7 padding is subject to side-channel attacks and timing attacks. For // the most secure data, use an authenticated crypto algorithm. func NewPKCS7Padder(blockSize int) Padder
var errPKCS7Padding = awserr.New("InvalidPadding", "invalid padding", nil) // Pad will pad the data relative to how many bytes have been read. // Pad follows the PKCS7 standard. func (padder pkcs7Padder) Pad(buf []byte, n int) ([]byte, error) { if padder.blockSize < 1 || padder.blockSize > pkcs7MaxPaddingSize { return nil, awserr.New("InvalidBlockSize", "block size must be between 1 and 255", nil) } size := padder.blockSize - (n % padder.blockSize) pad := bytes.Repeat([]byte{byte(size)}, size) buf = append(buf, pad...) return buf, nil } // Unpad will unpad the correct amount of bytes based off // of the PKCS7 standard func (padder pkcs7Padder) Unpad(buf []byte) ([]byte, error) { if len(buf) == 0 { return nil, errPKCS7Padding } // Here be dragons. We're attempting to check the padding in constant // time. The only piece of information here which is public is len(buf). // This code is modeled loosely after tls1_cbc_remove_padding from // OpenSSL. padLen := buf[len(buf)-1] toCheck := pkcs7MaxPaddingSize good := 1 if toCheck > len(buf) { toCheck = len(buf) } for i := 0; i < toCheck; i++ { b := buf[len(buf)-1-i] outOfRange := subtle.ConstantTimeLessOrEq(int(padLen), i) equal := subtle.ConstantTimeByteEq(padLen, b) good &= subtle.ConstantTimeSelect(outOfRange, 1, equal) } good &= subtle.ConstantTimeLessOrEq(1, int(padLen)) good &= subtle.ConstantTimeLessOrEq(int(padLen), len(buf)) if good != 1 { return nil, errPKCS7Padding } return buf[:len(buf)-int(padLen)], nil } func (padder pkcs7Padder) Name() string { return "PKCS7Padding" }
{ return pkcs7Padder{blockSize} }
init_target_cluster_test.go
// Copyright (c) 2017-2020 VMware, Inc. or its affiliates // SPDX-License-Identifier: Apache-2.0 package hub import ( "database/sql/driver" "fmt" "os" "os/exec" "reflect" "strings" "testing" sqlmock "github.com/DATA-DOG/go-sqlmock" "github.com/greenplum-db/gp-common-go-libs/dbconn" "github.com/greenplum-db/gp-common-go-libs/testhelper" "golang.org/x/xerrors" "github.com/greenplum-db/gpupgrade/greenplum" "github.com/greenplum-db/gpupgrade/step" "github.com/greenplum-db/gpupgrade/testutils/exectest" "github.com/greenplum-db/gpupgrade/utils" ) func gpinitsystem() {} func
() { os.Stdout.WriteString("[WARN]:-Master open file limit is 256 should be >= 65535") os.Exit(1) } func init() { exectest.RegisterMains( gpinitsystem, gpinitsystem_Exits1, ) } func TestCreateInitialInitsystemConfig(t *testing.T) { t.Run("successfully get initial gpinitsystem config array", func(t *testing.T) { utils.System.Hostname = func() (string, error) { return "mdw", nil } actualConfig, err := CreateInitialInitsystemConfig("/data/qddir/seg.AAAAAAAAAAA.-1") if err != nil { t.Fatalf("got %#v, want nil", err) } expectedConfig := []string{ `ARRAY_NAME="gp_upgrade cluster"`, "SEG_PREFIX=seg.AAAAAAAAAAA.", "TRUSTED_SHELL=ssh", } if !reflect.DeepEqual(actualConfig, expectedConfig) { t.Errorf("got %v, want %v", actualConfig, expectedConfig) } }) } func TestGetCheckpointSegmentsAndEncoding(t *testing.T) { t.Run("successfully get the GUC values", func(t *testing.T) { dbConn, sqlMock := testhelper.CreateAndConnectMockDB(1) checkpointRow := sqlmock.NewRows([]string{"string"}).AddRow(driver.Value("8")) encodingRow := sqlmock.NewRows([]string{"string"}).AddRow(driver.Value("UNICODE")) sqlMock.ExpectQuery("SELECT .*checkpoint.*").WillReturnRows(checkpointRow) sqlMock.ExpectQuery("SELECT .*server.*").WillReturnRows(encodingRow) actualConfig, err := GetCheckpointSegmentsAndEncoding([]string{}, dbConn) if err != nil { t.Fatalf("got %#v, want nil", err) } expectedConfig := []string{"CHECK_POINT_SEGMENTS=8", "ENCODING=UNICODE"} if !reflect.DeepEqual(actualConfig, expectedConfig) { t.Errorf("got %v, want %v", actualConfig, expectedConfig) } }) } func TestWriteSegmentArray(t *testing.T) { test := func(t *testing.T, initializeConfig InitializeConfig, expected []string) { t.Helper() actual, err := WriteSegmentArray([]string{}, initializeConfig) if err != nil { t.Errorf("got %#v", err) } if !reflect.DeepEqual(actual, expected) { // Help developers see differences between the lines. pretty := func(lines []string) string { b := new(strings.Builder) fmt.Fprintln(b, "[") for _, l := range lines { fmt.Fprintf(b, " %q\n", l) } fmt.Fprint(b, "]") return b.String() } t.Errorf("got %v, want %v", pretty(actual), pretty(expected)) } } t.Run("renders the config file as expected", func(t *testing.T) { config := InitializeConfig{ Master: greenplum.SegConfig{ContentID: -1, DbID: 1, Hostname: "mdw", DataDir: "/data/qddir_upgrade/seg-1", Role: "p", Port: 15433}, Primaries: []greenplum.SegConfig{ {ContentID: 0, DbID: 2, Hostname: "sdw1", DataDir: "/data/dbfast1_upgrade/seg1", Role: "p", Port: 15434}, {ContentID: 1, DbID: 3, Hostname: "sdw2", DataDir: "/data/dbfast2_upgrade/seg2", Role: "p", Port: 15434}, }, } test(t, config, []string{ "QD_PRIMARY_ARRAY=mdw~15433~/data/qddir_upgrade/seg-1~1~-1", "declare -a PRIMARY_ARRAY=(", "\tsdw1~15434~/data/dbfast1_upgrade/seg1~2~0", "\tsdw2~15434~/data/dbfast2_upgrade/seg2~3~1", ")", }) }) t.Run("errors when source cluster contains no master segment", func(t *testing.T) { _, err := WriteSegmentArray([]string{}, InitializeConfig{}) if err == nil { t.Errorf("expected error got nil") } }) } func TestRunInitsystemForTargetCluster(t *testing.T) { cluster6X := &greenplum.Cluster{ BinDir: "/target/bin", Version: dbconn.NewVersion("6.0.0"), } cluster7X := &greenplum.Cluster{ BinDir: "/target/bin", Version: dbconn.NewVersion("7.0.0"), } gpinitsystemConfigPath := "/dir/.gpupgrade/gpinitsystem_config" execCommand = nil defer func() { execCommand = nil }() t.Run("does not use --ignore-warnings when upgrading to GPDB7 or higher", func(t *testing.T) { execCommand = exectest.NewCommandWithVerifier(gpinitsystem, func(path string, args ...string) { if path != "bash" { t.Errorf("executed %q, want bash", path) } expected := []string{"-c", "source /target/greenplum_path.sh && " + "/target/bin/gpinitsystem -a -I /dir/.gpupgrade/gpinitsystem_config"} if !reflect.DeepEqual(args, expected) { t.Errorf("args %q, want %q", args, expected) } }) err := RunInitsystemForTargetCluster(step.DevNullStream, cluster7X, gpinitsystemConfigPath) if err != nil { t.Error("gpinitsystem failed") } }) t.Run("only uses --ignore-warnings when upgrading to GPDB6", func(t *testing.T) { execCommand = exectest.NewCommandWithVerifier(gpinitsystem, func(path string, args ...string) { if path != "bash" { t.Errorf("executed %q, want bash", path) } expected := []string{"-c", "source /target/greenplum_path.sh && " + "/target/bin/gpinitsystem -a -I /dir/.gpupgrade/gpinitsystem_config --ignore-warnings"} if !reflect.DeepEqual(args, expected) { t.Errorf("args %q, want %q", args, expected) } }) err := RunInitsystemForTargetCluster(step.DevNullStream, cluster6X, gpinitsystemConfigPath) if err != nil { t.Error("gpinitsystem failed") } }) t.Run("should use executables in the source's bindir even if bindir has a trailing slash", func(t *testing.T) { execCommand = exectest.NewCommandWithVerifier(gpinitsystem, func(path string, args ...string) { if path != "bash" { t.Errorf("executed %q, want bash", path) } expected := []string{"-c", "source /target/greenplum_path.sh && " + "/target/bin/gpinitsystem -a -I /dir/.gpupgrade/gpinitsystem_config"} if !reflect.DeepEqual(args, expected) { t.Errorf("args %q, want %q", args, expected) } }) cluster7X.BinDir += "/" err := RunInitsystemForTargetCluster(step.DevNullStream, cluster7X, gpinitsystemConfigPath) if err != nil { t.Error("gpinitsystem failed") } }) t.Run("returns an error when gpinitsystem fails with --ignore-warnings when upgrading to GPDB6", func(t *testing.T) { execCommand = exectest.NewCommand(gpinitsystem_Exits1) err := RunInitsystemForTargetCluster(step.DevNullStream, cluster6X, gpinitsystemConfigPath) var actual *exec.ExitError if !xerrors.As(err, &actual) { t.Fatalf("got %#v, want ExitError", err) } if actual.ExitCode() != 1 { t.Errorf("got %d, want 1 ", actual.ExitCode()) } }) t.Run("returns an error when gpinitsystem errors when upgrading to GPDB7 or higher", func(t *testing.T) { execCommand = exectest.NewCommand(gpinitsystem_Exits1) err := RunInitsystemForTargetCluster(step.DevNullStream, cluster7X, gpinitsystemConfigPath) var actual *exec.ExitError if !xerrors.As(err, &actual) { t.Fatalf("got %#v, want ExitError", err) } if actual.ExitCode() != 1 { t.Errorf("got %d, want 1", actual.ExitCode()) } }) } func TestGetMasterSegPrefix(t *testing.T) { t.Run("returns a valid seg prefix given", func(t *testing.T) { cases := []struct { desc string MasterDataDir string }{ {"an absolute path", "/data/master/gpseg-1"}, {"a relative path", "../master/gpseg-1"}, {"a implicitly relative path", "gpseg-1"}, } for _, c := range cases { actual, err := GetMasterSegPrefix(c.MasterDataDir) if err != nil { t.Fatalf("got %#v, want nil", err) } expected := "gpseg" if actual != expected { t.Errorf("got %q, want %q", actual, expected) } } }) t.Run("returns errors when given", func(t *testing.T) { cases := []struct { desc string MasterDataDir string }{ {"the empty string", ""}, {"a path without a content identifier", "/opt/myseg"}, {"a path with a segment content identifier", "/opt/myseg2"}, {"a path that is only a content identifier", "-1"}, {"a path that ends in only a content identifier", "///-1"}, } for _, c := range cases { _, err := GetMasterSegPrefix(c.MasterDataDir) if err == nil { t.Fatalf("got nil, want err") } } }) }
gpinitsystem_Exits1
workflow.go
package cancelactivity import ( "fmt" "time" "go.temporal.io/sdk/workflow" ) /** * This is the cancel activity workflow sample. */ // Workflow workflow func Workflow(ctx workflow.Context) error { ao := workflow.ActivityOptions{ StartToCloseTimeout: 30 * time.Minute, HeartbeatTimeout: 5 * time.Second, WaitForCancellation: true, } ctx = workflow.WithActivityOptions(ctx, ao) logger := workflow.GetLogger(ctx) logger.Info("cancel workflow started") var a *Activities // Used to call activities by function pointer defer func() { // When workflow is canceled, it has to get a new disconnected context to execute any activities newCtx, _ := workflow.NewDisconnectedContext(ctx) err := workflow.ExecuteActivity(newCtx, a.CleanupActivity).Get(ctx, nil) if err != nil
}() var result string err := workflow.ExecuteActivity(ctx, a.ActivityToBeCanceled).Get(ctx, &result) logger.Info(fmt.Sprintf("activityToBeCanceled returns %v, %v", result, err)) err = workflow.ExecuteActivity(ctx, a.ActivityToBeSkipped).Get(ctx, nil) logger.Error("Error from activityToBeSkipped", "Error", err) logger.Info("Workflow completed.") return nil }
{ logger.Error("Cleanup activity failed", "Error", err) }
test_create_backup.py
# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils import webob from nova.api.openstack import common from nova.api.openstack.compute import create_backup \ as create_backup_v21 from nova.compute import api from nova.compute import utils as compute_utils from nova import exception from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance class CreateBackupTestsV21(admin_only_action_common.CommonMixin, test.NoDBTestCase): create_backup = create_backup_v21 controller_name = 'CreateBackupController' validation_error = exception.ValidationError def setUp(self): super(CreateBackupTestsV21, self).setUp() self.controller = getattr(self.create_backup, self.controller_name)() self.compute_api = self.controller.compute_api patch_get = mock.patch.object(self.compute_api, 'get') self.mock_get = patch_get.start() self.addCleanup(patch_get.stop) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_with_metadata(self, mock_backup, mock_check_image): metadata = {'123': 'asdf'} body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, 'metadata': metadata, }, } image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1', properties=metadata) instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.return_value = image res = self.controller._create_backup(self.req, instance.uuid, body=body) mock_check_image.assert_called_once_with(self.context, metadata) mock_backup.assert_called_once_with(self.context, instance, 'Backup 1', 'daily', 1, extra_properties=metadata) self.assertEqual(202, res.status_int) self.assertIn('fake-image-id', res.headers['Location']) def test_create_backup_no_name(self): # Name is required for backups. body = { 'createBackup': { 'backup_type': 'daily', 'rotation': 1, }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_name_with_leading_trailing_spaces(self): body = { 'createBackup': { 'name': ' test ', 'backup_type': 'daily', 'rotation': 1, }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_name_with_leading_trailing_spaces_compat_mode( self, mock_backup, mock_check_image): body = { 'createBackup': { 'name': ' test ', 'backup_type': 'daily', 'rotation': 1, }, } image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1', properties={}) instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.return_value = image self.req.set_legacy_v2() self.controller._create_backup(self.req, instance.uuid, body=body) mock_check_image.assert_called_once_with(self.context, {}) mock_backup.assert_called_once_with(self.context, instance, 'test', 'daily', 1, extra_properties={}) def test_create_backup_no_rotation(self): # Rotation is required for backup requests. body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_negative_rotation(self): """Rotation must be greater than or equal to zero for backup requests """ body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': -1, }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_negative_rotation_with_string_number(self): body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': '-1', }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_rotation_with_empty_string(self): body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': '', }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_no_backup_type(self): # Backup Type (daily or weekly) is required for backup requests. body = { 'createBackup': { 'name': 'Backup 1', 'rotation': 1, }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_non_dict_metadata(self): body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, 'metadata': 'non_dict', }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) def test_create_backup_bad_entity(self): body = {'createBackup': 'go'} self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_rotation_is_zero(self, mock_backup, mock_check_image): # The happy path for creating backups if rotation is zero. body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 0, }, } image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1', properties={}) instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.return_value = image res = self.controller._create_backup(self.req, instance.uuid, body=body) mock_check_image.assert_called_once_with(self.context, {}) mock_backup.assert_called_once_with(self.context, instance, 'Backup 1', 'daily', 0, extra_properties={}) self.assertEqual(202, res.status_int) self.assertNotIn('Location', res.headers) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_rotation_is_positive(self, mock_backup, mock_check_image): # The happy path for creating backups if rotation is positive. body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, }, } image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1', properties={}) instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.return_value = image res = self.controller._create_backup(self.req, instance.uuid, body=body) mock_check_image.assert_called_once_with(self.context, {}) mock_backup.assert_called_once_with(self.context, instance, 'Backup 1', 'daily', 1, extra_properties={}) self.assertEqual(202, res.status_int) self.assertIn('fake-image-id', res.headers['Location']) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def
( self, mock_backup, mock_check_image): body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': '1', }, } image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1', properties={}) instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.return_value = image res = self.controller._create_backup(self.req, instance['uuid'], body=body) mock_check_image.assert_called_once_with(self.context, {}) mock_backup.assert_called_once_with(self.context, instance, 'Backup 1', 'daily', 1, extra_properties={}) self.assertEqual(202, res.status_int) self.assertIn('fake-image-id', res.headers['Location']) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup', return_value=dict( id='fake-image-id', status='ACTIVE', name='Backup 1', properties={})) def test_create_backup_v2_45(self, mock_backup, mock_check_image): """Tests the 2.45 microversion to ensure the Location header is not in the response. """ body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': '1', }, } instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance req = fakes.HTTPRequest.blank('', version='2.45') res = self.controller._create_backup(req, instance['uuid'], body=body) self.assertIsInstance(res, dict) self.assertEqual('fake-image-id', res['image_id']) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(api.API, 'backup') def test_create_backup_raises_conflict_on_invalid_state(self, mock_backup, mock_check_image): body_map = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, }, } instance = fake_instance.fake_instance_obj(self.context) self.mock_get.return_value = instance mock_backup.side_effect = exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance.uuid, state='foo', method='backup') ex = self.assertRaises(webob.exc.HTTPConflict, self.controller._create_backup, self.req, instance.uuid, body=body_map) self.assertIn("Cannot 'createBackup' instance %(id)s" % {'id': instance.uuid}, ex.explanation) def test_create_backup_with_non_existed_instance(self): body_map = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, }, } uuid = fakes.FAKE_UUID self.mock_get.side_effect = exception.InstanceNotFound( instance_id=uuid) self.assertRaises(webob.exc.HTTPNotFound, self.controller._create_backup, self.req, uuid, body=body_map) def test_create_backup_with_invalid_create_backup(self): body = { 'createBackupup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, }, } self.assertRaises(self.validation_error, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(compute_utils, 'is_volume_backed_instance', return_value=True) def test_backup_volume_backed_instance(self, mock_is_volume_backed, mock_check_image): body = { 'createBackup': { 'name': 'BackupMe', 'backup_type': 'daily', 'rotation': 3 }, } updates = {'vm_state': 'active', 'task_state': None, 'launched_at': timeutils.utcnow()} instance = fake_instance.fake_instance_obj(self.context, **updates) instance.image_ref = None self.mock_get.return_value = instance ex = self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create_backup, self.req, instance['uuid'], body=body) mock_check_image.assert_called_once_with(self.context, {}) mock_is_volume_backed.assert_called_once_with(self.context, instance) self.assertIn('Backup is not supported for volume-backed instances', str(ex)) class CreateBackupTestsV239(test.NoDBTestCase): def setUp(self): super(CreateBackupTestsV239, self).setUp() self.controller = create_backup_v21.CreateBackupController() self.req = fakes.HTTPRequest.blank('', version='2.39') @mock.patch.object(common, 'check_img_metadata_properties_quota') @mock.patch.object(common, 'get_instance') def test_create_backup_no_quota_checks(self, mock_get_instance, mock_check_quotas): # 'mock_get_instance' helps to skip the whole logic of the action, # but to make the test mock_get_instance.side_effect = webob.exc.HTTPNotFound metadata = {'123': 'asdf'} body = { 'createBackup': { 'name': 'Backup 1', 'backup_type': 'daily', 'rotation': 1, 'metadata': metadata, }, } self.assertRaises(webob.exc.HTTPNotFound, self.controller._create_backup, self.req, fakes.FAKE_UUID, body=body) # starting from version 2.39 no quota checks on Nova side are performed # for 'createBackup' action after removing 'image-metadata' proxy API mock_check_quotas.assert_not_called()
test_create_backup_rotation_is_string_number
dropdown.component.ts
import { Component, Input, Output, EventEmitter, ElementRef, ContentChild, OnInit, ViewChild, AfterContentInit, HostListener, OnDestroy, HostBinding, TemplateRef, ApplicationRef, AfterViewInit } from "@angular/core"; import { NG_VALUE_ACCESSOR, ControlValueAccessor } from "@angular/forms"; // Observable import is required here so typescript can compile correctly import { Observable, of, Subscription } from "rxjs"; import { AbstractDropdownView } from "./abstract-dropdown-view.class"; import { I18n } from "./../i18n/index"; import { ListItem } from "./list-item.interface"; import { DropdownService } from "./dropdown.service"; import { ElementService } from "./../utils/utils.module"; import { hasScrollableParents } from "../utils"; /** * Drop-down lists enable users to select one or more items from a list. * * #### Opening behavior/List DOM placement * By default the dropdown will try to figure out the best placement for the dropdown list. * * If it's not contained within any scrolling elements, it will open inline, if it _is_ * contained within a scrolling container it will try to open in the body, or an `ibm-placeholder`. * * To control this behavior you can use the `appendInline` input: * - `[appendInline]="null"` is the default (auto detection) * - `[appendInline]="false"` will always append to the body/`ibm-placeholder` * - `[appendInline]="true"` will always append inline (next to the dropdown button) * * [See demo](../../?path=/story/dropdown--basic) * * <example-url>../../iframe.html?id=dropdown--basic</example-url> */ @Component({ selector: "ibm-dropdown", template: ` <label *ngIf="label" class="bx--label"> <ng-container *ngIf="!isTemplate(label)">{{label}}</ng-container> <ng-template *ngIf="isTemplate(label)" [ngTemplateOutlet]="label"></ng-template> </label> <div *ngIf="helperText" class="bx--form__helper-text"> <ng-container *ngIf="!isTemplate(helperText)">{{helperText}}</ng-container> <ng-template *ngIf="isTemplate(helperText)" [ngTemplateOutlet]="helperText"></ng-template> </div> <div [id]="id" class="bx--list-box" [ngClass]="{ 'bx--dropdown': type !== 'multi', 'bx--multiselect': type === 'multi', 'bx--multi-select--selected': type === 'multi' && getSelectedCount() > 0, 'bx--dropdown--light': theme === 'light', 'bx--list-box--light': theme === 'light', 'bx--list-box--inline': inline, 'bx--skeleton': skeleton, 'bx--dropdown--disabled bx--list-box--disabled': disabled, 'bx--dropdown--invalid': invalid, 'bx--list-box--up': dropUp, 'bx--dropdown--xl bx--list-box--xl': size === 'xl', 'bx--dropdown--sm bx--list-box--sm': size === 'sm', 'bx--list-box--expanded': !menuIsClosed }"> <button #dropdownButton type="button" class="bx--list-box__field" [ngClass]="{'a': !menuIsClosed}" [attr.aria-expanded]="!menuIsClosed" [attr.aria-disabled]="disabled" aria-haspopup="listbox" (click)="disabled ? $event.stopPropagation() : toggleMenu()" (blur)="onBlur()" [attr.disabled]="disabled ? true : null"> <div (click)="clearSelected()" (keydown.enter)="clearSelected()" *ngIf="type === 'multi' && getSelectedCount() > 0" class="bx--list-box__selection bx--tag--filter bx--list-box__selection--multi" tabindex="0" [title]="clearText"> {{getSelectedCount()}} <svg focusable="false" preserveAspectRatio="xMidYMid meet" style="will-change: transform;" role="img" xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" aria-hidden="true"> <path d="M12 4.7l-.7-.7L8 7.3 4.7 4l-.7.7L7.3 8 4 11.3l.7.7L8 8.7l3.3 3.3.7-.7L8.7 8z"></path> </svg> </div> <span *ngIf="isRenderString()" class="bx--list-box__label">{{getDisplayStringValue() | async}}</span> <ng-template *ngIf="!isRenderString()" [ngTemplateOutletContext]="getRenderTemplateContext()" [ngTemplateOutlet]="displayValue"> </ng-template> <svg ibmIconWarningFilled size="16" *ngIf="invalid" class="bx--dropdown__invalid-icon"> </svg> <ibm-icon-chevron-down size="16" *ngIf="!skeleton" class="bx--list-box__menu-icon" [attr.aria-label]="menuButtonLabel" [ngClass]="{'bx--list-box__menu-icon--open': !menuIsClosed }"> </ibm-icon-chevron-down> </button> <div #dropdownMenu [ngClass]="{ 'drop-up': dropUp }"> <ng-content *ngIf="!menuIsClosed"></ng-content> </div> </div> <div *ngIf="invalid"> <div *ngIf="!isTemplate(invalidText)" class="bx--form-requirement">{{ invalidText }}</div> <ng-template *ngIf="isTemplate(invalidText)" [ngTemplateOutlet]="invalidText"></ng-template> </div> `, providers: [ { provide: NG_VALUE_ACCESSOR, useExisting: Dropdown, multi: true } ] }) export class Dropdown implements OnInit, AfterContentInit, AfterViewInit, OnDestroy, ControlValueAccessor { static dropdownCount = 0; @Input() id = `dropdown-${Dropdown.dropdownCount++}`; /** * Label for the dropdown. */ @Input() label: string | TemplateRef<any>; /** * Sets the optional helper text. */ @Input() helperText: string | TemplateRef<any>; /** * Value displayed if no item is selected. */ @Input() placeholder = ""; /** * The selected value from the `Dropdown`. Can be a string or template. */ @Input() displayValue: string | TemplateRef<any> = ""; /** * Sets the optional clear button tooltip text. */ @Input() clearText: string = this.i18n.get().DROPDOWN.CLEAR; /** * Size to render the dropdown field. * * @deprecated since v4 */ @Input() size: "sm" | "md" | "xl" = "md"; /** * Defines whether or not the `Dropdown` supports selecting multiple items as opposed to single * item selection. */ @Input() type: "single" | "multi" = "single"; /** * `light` or `dark` dropdown theme */ @Input() theme: "light" | "dark" = "dark"; /** * Set to `true` to disable the dropdown. */ @Input() disabled = false; /** * Set to `true` for a loading dropdown. */ @Input() skeleton = false; /** * Set to `true` for an inline dropdown. */ @Input() inline = false; /** * Set to `true` for a dropdown without arrow key activation. */ @Input() disableArrowKeys = false; /** * Set to `true` for invalid state. */ @Input() invalid = false; /** * Value displayed if dropdown is in invalid state. */ @Input() invalidText: string | TemplateRef<any>; /** * set to `true` to place the dropdown view inline with the component */ @Input() appendInline: boolean = null; /** * Query string for the element that contains the `Dropdown`. * Used to trigger closing the dropdown if it scrolls outside of the viewport of the `scrollableContainer`. */ @Input() scrollableContainer: string; /** * Deprecated. Use `itemValueKey` instead. * Specifies the property to be used as the return value to `ngModel` * @deprecated since v4 use itemValueKey instead */ @Input() set value (newValue: string) { console.warn("Dropdown `value` property has been deprecated. Use `itemValueKey` instead"); this.itemValueKey = newValue; } get value() { return this.itemValueKey; } /** * Specifies the property to be used as the return value to `ngModel` */ @Input() itemValueKey: string; /** * Specify feedback (mode) of the selection. * `top`: selected item jumps to top * `fixed`: selected item stays at it's position * `top-after-reopen`: selected item jump to top after reopen dropdown */ @Input() selectionFeedback: "top" | "fixed" | "top-after-reopen" = "top-after-reopen"; /** * Accessible label for the button that opens the dropdown list. * Defaults to the `DROPDOWN.OPEN` value from the i18n service. */ @Input() menuButtonLabel = this.i18n.get().DROPDOWN.OPEN; /** * Provides the label for the "# selected" text. * Defaults to the `DROPDOWN.SELECTED` value from the i18n service. */ @Input() selectedLabel = this.i18n.get().DROPDOWN.SELECTED; /** * Emits selection events. */ @Output() selected: EventEmitter<Object> = new EventEmitter<Object>(); /** * Emits event notifying to other classes that the `Dropdown` has been closed (collapsed). */ @Output() onClose: EventEmitter<any> = new EventEmitter<any>(); /** * Emits event notifying to other classes that the `Dropdown` has been closed (collapsed). */ @Output() close: EventEmitter<any> = new EventEmitter<any>(); /** * Maintains a reference to the `AbstractDropdownView` object within the content DOM. */ // @ts-ignore @ContentChild(AbstractDropdownView, { static: true }) view: AbstractDropdownView; /** * Maintains a reference to the view DOM element of the `Dropdown` button. */ // @ts-ignore @ViewChild("dropdownButton", { static: false }) dropdownButton; /** * ViewChid of the dropdown view. */ // @ts-ignore @ViewChild("dropdownMenu", { static: false }) dropdownMenu; @HostBinding("class.bx--dropdown__wrapper") hostClass = true; /** * Set to `true` if the dropdown is closed (not expanded). */ menuIsClosed = true; /** * controls wether the `drop-up` class is applied */ dropUp = false; // .bind creates a new function, so we declare the methods below // but .bind them up here noop = this._noop.bind(this); outsideClick = this._outsideClick.bind(this); outsideKey = this._outsideKey.bind(this); keyboardNav = this._keyboardNav.bind(this); protected visibilitySubscription = new Subscription(); protected onTouchedCallback: () => void = this._noop; // primarily used to capture and propagate input to `writeValue` before the content is available protected writtenValue = []; /** * Creates an instance of Dropdown. */ constructor( protected elementRef: ElementRef, protected i18n: I18n, protected dropdownService: DropdownService, protected appRef: ApplicationRef, protected elementService: ElementService) {} /** * Updates the `type` property in the `@ContentChild`. * The `type` property specifies whether the `Dropdown` allows single selection or multi selection. */ ngOnInit() { if (this.view) { this.view.type = this.type; } } /** * Initializes classes and subscribes to events for single or multi selection. */ ngAfterContentInit() { if (!this.view) { return; } if (this.writtenValue && this.writtenValue.length) { this.writeValue(this.writtenValue); } this.view.type = this.type; this.view.size = this.size; this.view.select.subscribe(event => { if (this.type === "multi") { // if we have a `value` selector and selected items map them appropriately if (this.itemValueKey && this.view.getSelected()) { const values = this.view.getSelected().map(item => item[this.itemValueKey]); this.propagateChange(values); // otherwise just pass up the values from `getSelected` } else { this.propagateChange(this.view.getSelected()); } } else { this.closeMenu(); if (event.item && event.item.selected) { if (this.itemValueKey) { this.propagateChange(event.item[this.itemValueKey]); } else { this.propagateChange(event.item); } } else { this.propagateChange(null); } } // only emit selected for "organic" selections if (event && !event.isUpdate) { this.checkForReorder(); this.selected.emit(event); } // manually tick the app so the view picks up any changes this.appRef.tick(); }); } ngAfterViewInit() { // if appendInline is default valued (null) we should: // 1. if there are scrollable parents (not including body) don't append inline // this should also cover the case where the dropdown is in a modal // (where we _do_ want to append to the placeholder) if (this.appendInline === null && hasScrollableParents(this.elementRef.nativeElement)) { this.appendInline = false; // 2. otherwise we should append inline } else if (this.appendInline === null) { this.appendInline = true; } this.checkForReorder(); } /** * Removing the `Dropdown` from the body if it is appended to the body. */ ngOnDestroy() { if (!this.appendInline) { this._appendToDropdown(); } } /** * Propagates the injected `value`. */ writeValue(value: any) { // cache the written value so we can use it in `AfterContentInit` this.writtenValue = value; this.view.onItemsReady(() => { // propagate null/falsey as an array (deselect everything) if (!value) { this.view.propagateSelected([value]); } else if (this.type === "single") { if (this.itemValueKey) { // clone the specified item and update its state const newValue = Object.assign({}, this.view.getListItems().find(item => item[this.itemValueKey] === value)); newValue.selected = true; this.view.propagateSelected([newValue]); } else { // pass the singular value as an array of ListItem this.view.propagateSelected([value]); } } else { if (this.itemValueKey) { // clone the items and update their state based on the received value array // this way we don't lose any additional metadata that may be passed in via the `items` Input let newValues = []; for (const v of value) { for (const item of this.view.getListItems()) { if (item[this.itemValueKey] === v) { newValues.push(Object.assign({}, item, { selected: true })); } } } this.view.propagateSelected(newValues); } else { // we can safely assume we're passing an array of `ListItem`s this.view.propagateSelected(value); } } this.checkForReorder(); }); } onBlur() { this.onTouchedCallback(); } registerOnChange(fn: any) { this.propagateChange = fn; } /** * Registering the function injected to control the touch use of the `Dropdown`. */ registerOnTouched(fn: any) { this.onTouchedCallback = fn; } /** * function passed in by `registerOnChange` */ propagateChange = (_: any) => {}; /** * `ControlValueAccessor` method to programmatically disable the dropdown. * * ex: `this.formGroup.get("myDropdown").disable();` * * @param isDisabled `true` to disable the input */ setDisabledState(isDisabled: boolean) { this.disabled = isDisabled; } /** * Adds keyboard functionality for navigation, selection and closing of the `Dropdown`. */ @HostListener("keydown", ["$event"]) // "Esc", "Spacebar", "Down", and "Up" are IE specific values onKeyDown(event: KeyboardEvent) { if ((event.key === "Escape" || event.key === "Esc") && !this.menuIsClosed) { event.stopImmediatePropagation(); // don't unintentionally close other widgets that listen for Escape } if (event.key === "Escape" || event.key === "Esc") { event.preventDefault(); this.closeMenu(); this.dropdownButton.nativeElement.focus(); } else if (this.menuIsClosed && (event.key === " " || event.key === "ArrowDown" || event.key === "ArrowUp" || event.key === "Spacebar" || event.key === "Down" || event.key === "Up")) { if (this.disableArrowKeys && (event.key === "ArrowDown" || event.key === "ArrowUp" || event.key === "Down" || event.key === "Up")) { return; } event.preventDefault(); this.openMenu(); } if (!this.menuIsClosed && event.key === "Tab" && this.dropdownMenu.nativeElement.contains(event.target as Node)) { this.closeMenu(); } if (!this.menuIsClosed && event.key === "Tab" && event.shiftKey) { this.closeMenu(); }
} } closedDropdownNavigation(event) { // "Down", and "Up" are IE specific values if (event.key === "ArrowDown" || event.key === "Down") { event.preventDefault(); this.view.getCurrentItem().selected = false; let item = this.view.getNextItem(); if (item) { item.selected = true; } } else if (event.key === "ArrowUp" || event.key === "Up") { event.preventDefault(); this.view.getCurrentItem().selected = false; let item = this.view.getPrevItem(); if (item) { item.selected = true; } } } /** * Returns the display value if there is a selection and displayValue is set, * if there is just a selection the ListItem content property will be returned, * otherwise the placeholder will be returned. */ getDisplayStringValue(): Observable<string> { if (!this.view) { return; } let selected = this.view.getSelected(); if (selected.length && (!this.displayValue || !this.isRenderString())) { if (this.type === "multi") { return of(this.placeholder); } else { return of(selected[0].content); } } else if (selected.length && this.isRenderString()) { return of(this.displayValue as string); } return of(this.placeholder); } isRenderString(): boolean { return typeof this.displayValue === "string"; } getRenderTemplateContext() { if (!this.view) { return; } let selected = this.view.getSelected(); if (this.type === "multi") { return {items: selected}; } else if (selected && selected.length > 0) { return {item: selected[0]}; // this is to be compatible with the dropdown-list template } else { return {}; } } getSelectedCount(): number { if (this.view.getSelected()) { return this.view.getSelected().length; } } clearSelected() { if (this.disabled) { return; } for (const item of this.view.getListItems()) { item.selected = false; } this.selected.emit([]); this.propagateChange([]); } /** * Returns `true` if there is a value selected. */ valueSelected(): boolean { if (this.view.getSelected()) { return true; } return false; } _noop() {} /** * Handles clicks outside of the `Dropdown`. */ _outsideClick(event) { if (!this.elementRef.nativeElement.contains(event.target) && // if we're appendToBody the list isn't within the _elementRef, // so we've got to check if our target is possibly in there too. !this.dropdownMenu.nativeElement.contains(event.target)) { this.closeMenu(); } } _outsideKey(event) { if (!this.menuIsClosed && event.key === "Tab" && this.dropdownMenu.nativeElement.contains(event.target as Node)) { this.closeMenu(); } } /** * Handles keyboard events so users are controlling the `Dropdown` instead of unintentionally controlling outside elements. */ _keyboardNav(event: KeyboardEvent) { // "Esc" is an IE specific value if ((event.key === "Escape" || event.key === "Esc") && !this.menuIsClosed) { event.stopImmediatePropagation(); // don't unintentionally close modal if inside of it } if (event.key === "Escape" || event.key === "Esc") { event.preventDefault(); this.closeMenu(); this.dropdownButton.nativeElement.focus(); } else if (!this.menuIsClosed && event.key === "Tab") { // this way focus will start on the next focusable item from the dropdown // not the top of the body! this.dropdownButton.nativeElement.focus(); this.dropdownButton.nativeElement.dispatchEvent(new KeyboardEvent("keydown", {bubbles: true, cancelable: true, key: "Tab"})); this.closeMenu(); } } /** * Creates the `Dropdown` list appending it to the dropdown parent object instead of the body. */ _appendToDropdown() { this.dropdownService.appendToDropdown(this.elementRef.nativeElement); this.dropdownMenu.nativeElement.removeEventListener("keydown", this.keyboardNav, true); } /** * Creates the `Dropdown` list as an element that is appended to the DOM body. */ _appendToBody() { const lightClass = this.theme === "light" ? " bx--list-box--light" : ""; const expandedClass = !this.menuIsClosed ? " bx--list-box--expanded" : ""; this.dropdownService.appendToBody( this.dropdownButton.nativeElement, this.dropdownMenu.nativeElement, `${this.elementRef.nativeElement.className}${lightClass}${expandedClass}`); this.dropdownMenu.nativeElement.addEventListener("keydown", this.keyboardNav, true); } /** * Expands the dropdown menu in the view. */ openMenu() { // prevents the dropdown from opening when list of items is empty if (this.view.getListItems().length === 0) { return; } this.menuIsClosed = false; // move the dropdown list to the body if we're not appending inline // and position it relative to the dropdown wrapper if (!this.appendInline) { const target = this.dropdownButton.nativeElement; const parent = this.elementRef.nativeElement; this.visibilitySubscription = this.elementService .visibility(target, parent) .subscribe(value => { if (!value.visible) { this.closeMenu(); } } ); this._appendToBody(); } // set the dropdown menu to drop up if it's near the bottom of the screen // setTimeout lets us measure after it's visible in the DOM setTimeout(() => { const button = this.dropdownButton.nativeElement; const boundingClientRect = button.getBoundingClientRect(); // +100 to give the dropUp some buffer if ((boundingClientRect.bottom + 100) > window.innerHeight) { this.dropUp = true; } else { this.dropUp = false; } }, 0); // we bind noop to document.body.firstElementChild to allow safari to fire events // from document. Then we unbind everything later to keep things light. document.body.firstElementChild.addEventListener("click", this.noop, true); document.body.firstElementChild.addEventListener("keydown", this.noop, true); document.addEventListener("click", this.outsideClick, true); document.addEventListener("keydown", this.outsideKey, true); setTimeout(() => this.view.initFocus(), 0); } /** * Collapsing the dropdown menu and removing unnecessary `EventListeners`. */ closeMenu() { // return early if the menu is already closed if (this.menuIsClosed) { return; } this.menuIsClosed = true; this.checkForReorder(); this.onClose.emit(); this.close.emit(); // focus the trigger button when we close ... this.dropdownButton.nativeElement.focus(); // remove the conditional once this api is settled and part of abstract-dropdown-view.class if (this.view["disableScroll"]) { this.view["disableScroll"](); } // move the list back in the component on close if (!this.appendInline) { this.visibilitySubscription.unsubscribe(); this._appendToDropdown(); } document.body.firstElementChild.removeEventListener("click", this.noop, true); document.body.firstElementChild.removeEventListener("keydown", this.noop, true); document.removeEventListener("click", this.outsideClick, true); document.removeEventListener("keydown", this.outsideKey, true); } /** * Controls toggling menu states between open/expanded and closed/collapsed. */ toggleMenu() { if (this.menuIsClosed) { this.openMenu(); } else { this.closeMenu(); } } public isTemplate(value) { return value instanceof TemplateRef; } /** * Controls when it's needed to apply the selection feedback */ protected checkForReorder() { const topAfterReopen = this.menuIsClosed && this.selectionFeedback === "top-after-reopen"; if ((this.type === "multi") && (topAfterReopen || this.selectionFeedback === "top")) { this.view.reorderSelected(this.selectionFeedback === "top"); } } }
if (this.type === "multi") { return; } if (this.menuIsClosed) { this.closedDropdownNavigation(event);