file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
encoding.go | // Copied from github.com/hashicorp/terraform/internal/lang/funcs
package funcs
import (
"bytes"
"compress/gzip"
"encoding/base64"
"fmt"
"log"
"net/url"
"unicode/utf8"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
"golang.org/x/text/encoding/ianaindex"
)
// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence.
var Base64DecodeFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
s := args[0].AsString()
sDec, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s)
}
if !utf8.Valid([]byte(sDec)) {
log.Printf("[DEBUG] the result of decoding the provided string is not valid UTF-8: %s", sDec)
return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the provided string is not valid UTF-8")
}
return cty.StringVal(string(sDec)), nil
},
})
// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence.
var Base64EncodeFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil
},
})
// TextEncodeBase64Func constructs a function that encodes a string to a target encoding and then to a base64 sequence.
var TextEncodeBase64Func = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "string",
Type: cty.String,
},
{
Name: "encoding",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
encoding, err := ianaindex.IANA.Encoding(args[1].AsString())
if err != nil || encoding == nil {
return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString())
}
encName, err := ianaindex.IANA.Name(encoding)
if err != nil { // would be weird, since we just read this encoding out
encName = args[1].AsString()
}
encoder := encoding.NewEncoder()
encodedInput, err := encoder.Bytes([]byte(args[0].AsString()))
if err != nil {
// The string representations of "err" disclose implementation
// details of the underlying library, and the main error we might
// like to return a special message for is unexported as
// golang.org/x/text/encoding/internal.RepertoireError, so this
// is just a generic error message for now.
//
// We also don't include the string itself in the message because
// it can typically be very large, contain newline characters,
// etc.
return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains characters that cannot be represented in %s", encName)
}
return cty.StringVal(base64.StdEncoding.EncodeToString(encodedInput)), nil
},
})
// TextDecodeBase64Func constructs a function that decodes a base64 sequence to a target encoding.
var TextDecodeBase64Func = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "source",
Type: cty.String,
},
{
Name: "encoding",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
encoding, err := ianaindex.IANA.Encoding(args[1].AsString())
if err != nil || encoding == nil {
return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString())
}
encName, err := ianaindex.IANA.Name(encoding)
if err != nil { // would be weird, since we just read this encoding out
encName = args[1].AsString()
}
s := args[0].AsString()
sDec, err := base64.StdEncoding.DecodeString(s)
if err != nil {
switch err := err.(type) {
case base64.CorruptInputError:
return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given value is has an invalid base64 symbol at offset %d", int(err))
default:
return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid source string: %T", err)
}
}
decoder := encoding.NewDecoder()
decoded, err := decoder.Bytes(sDec)
if err != nil || bytes.ContainsRune(decoded, '�') {
return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains symbols that are not defined for %s", encName)
}
return cty.StringVal(string(decoded)), nil
},
})
// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in
// Base64 encoding.
var Base64GzipFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
s := args[0].AsString()
var b bytes.Buffer
gz := gzip.NewWriter(&b)
if _, err := gz.Write([]byte(s)); err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s)
}
if err := gz.Flush(); err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s)
}
if err := gz.Close(); err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s)
}
return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil | })
// URLEncodeFunc constructs a function that applies URL encoding to a given string.
var URLEncodeFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(url.QueryEscape(args[0].AsString())), nil
},
})
// Base64Decode decodes a string containing a base64 sequence.
//
// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
//
// Strings in the Terraform language are sequences of unicode characters rather
// than bytes, so this function will also interpret the resulting bytes as
// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function
// produces an error.
func Base64Decode(str cty.Value) (cty.Value, error) {
return Base64DecodeFunc.Call([]cty.Value{str})
}
// Base64Encode applies Base64 encoding to a string.
//
// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
//
// Strings in the Terraform language are sequences of unicode characters rather
// than bytes, so this function will first encode the characters from the string
// as UTF-8, and then apply Base64 encoding to the result.
func Base64Encode(str cty.Value) (cty.Value, error) {
return Base64EncodeFunc.Call([]cty.Value{str})
}
// Base64Gzip compresses a string with gzip and then encodes the result in
// Base64 encoding.
//
// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
//
// Strings in the Terraform language are sequences of unicode characters rather
// than bytes, so this function will first encode the characters from the string
// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding.
func Base64Gzip(str cty.Value) (cty.Value, error) {
return Base64GzipFunc.Call([]cty.Value{str})
}
// URLEncode applies URL encoding to a given string.
//
// This function identifies characters in the given string that would have a
// special meaning when included as a query string argument in a URL and
// escapes them using RFC 3986 "percent encoding".
//
// If the given string contains non-ASCII characters, these are first encoded as
// UTF-8 and then percent encoding is applied separately to each UTF-8 byte.
func URLEncode(str cty.Value) (cty.Value, error) {
return URLEncodeFunc.Call([]cty.Value{str})
}
// TextEncodeBase64 applies Base64 encoding to a string that was encoded before with a target encoding.
//
// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
//
// First step is to apply the target IANA encoding (e.g. UTF-16LE).
// Strings in the Terraform language are sequences of unicode characters rather
// than bytes, so this function will first encode the characters from the string
// as UTF-8, and then apply Base64 encoding to the result.
func TextEncodeBase64(str, enc cty.Value) (cty.Value, error) {
return TextEncodeBase64Func.Call([]cty.Value{str, enc})
}
// TextDecodeBase64 decodes a string containing a base64 sequence whereas a specific encoding of the string is expected.
//
// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
//
// Strings in the Terraform language are sequences of unicode characters rather
// than bytes, so this function will also interpret the resulting bytes as
// the target encoding.
func TextDecodeBase64(str, enc cty.Value) (cty.Value, error) {
return TextDecodeBase64Func.Call([]cty.Value{str, enc})
} | }, |
proxy_test.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"bytes"
"compress/gzip"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/websocket"
"k8s.io/apimachinery/pkg/util/httpstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apiserver/pkg/util/proxy"
)
const fakeStatusCode = 567
type fakeResponder struct {
t *testing.T
called bool
err error
// called chan error
w http.ResponseWriter
}
func (r *fakeResponder) Error(err error) {
if r.called {
r.t.Errorf("Error responder called again!\nprevious error: %v\nnew error: %v", r.err, err)
}
if r.w != nil {
r.w.WriteHeader(fakeStatusCode)
_, writeErr := r.w.Write([]byte(err.Error()))
assert.NoError(r.t, writeErr)
} else {
r.t.Logf("No ResponseWriter set")
}
r.called = true
r.err = err
}
type fakeConn struct {
err error // The error to return when io is performed over the connection.
}
func (f *fakeConn) Read([]byte) (int, error) { return 0, f.err }
func (f *fakeConn) Write([]byte) (int, error) { return 0, f.err }
func (f *fakeConn) Close() error { return nil }
func (fakeConn) LocalAddr() net.Addr { return nil }
func (fakeConn) RemoteAddr() net.Addr { return nil }
func (fakeConn) SetDeadline(t time.Time) error { return nil }
func (fakeConn) SetReadDeadline(t time.Time) error { return nil }
func (fakeConn) SetWriteDeadline(t time.Time) error { return nil }
type SimpleBackendHandler struct {
requestURL url.URL
requestHeader http.Header
requestBody []byte
requestMethod string
responseBody string
responseHeader map[string]string
t *testing.T
}
func (s *SimpleBackendHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
s.requestURL = *req.URL
s.requestHeader = req.Header
s.requestMethod = req.Method
var err error
s.requestBody, err = ioutil.ReadAll(req.Body)
if err != nil {
s.t.Errorf("Unexpected error: %v", err)
return
}
if s.responseHeader != nil {
for k, v := range s.responseHeader {
w.Header().Add(k, v)
}
}
w.Write([]byte(s.responseBody))
}
func validateParameters(t *testing.T, name string, actual url.Values, expected map[string]string) {
for k, v := range expected {
actualValue, ok := actual[k]
if !ok {
t.Errorf("%s: Expected parameter %s not received", name, k)
continue
}
if actualValue[0] != v {
t.Errorf("%s: Parameter %s values don't match. Actual: %#v, Expected: %s",
name, k, actualValue, v)
}
}
}
func validateHeaders(t *testing.T, name string, actual http.Header, expected map[string]string, notExpected []string) {
for k, v := range expected {
actualValue, ok := actual[k]
if !ok {
t.Errorf("%s: Expected header %s not received", name, k)
continue
}
if actualValue[0] != v {
t.Errorf("%s: Header %s values don't match. Actual: %s, Expected: %s",
name, k, actualValue, v)
}
}
if notExpected == nil {
return
}
for _, h := range notExpected {
if _, present := actual[h]; present {
t.Errorf("%s: unexpected header: %s", name, h)
}
}
}
func TestServeHTTP(t *testing.T) {
tests := []struct {
name string
method string
requestPath string
expectedPath string
requestBody string
requestParams map[string]string
requestHeader map[string]string
responseHeader map[string]string
expectedRespHeader map[string]string
notExpectedRespHeader []string
upgradeRequired bool
expectError func(err error) bool
}{
{
name: "root path, simple get",
method: "GET",
requestPath: "/",
expectedPath: "/",
},
{
name: "no upgrade header sent",
method: "GET",
requestPath: "/",
upgradeRequired: true,
expectError: func(err error) bool {
return err != nil && strings.Contains(err.Error(), "Upgrade request required")
},
},
{
name: "simple path, get",
method: "GET",
requestPath: "/path/to/test",
expectedPath: "/path/to/test",
},
{
name: "request params",
method: "POST",
requestPath: "/some/path/",
expectedPath: "/some/path/",
requestParams: map[string]string{"param1": "value/1", "param2": "value%2"},
requestBody: "test request body",
},
{
name: "request headers",
method: "PUT",
requestPath: "/some/path",
expectedPath: "/some/path",
requestHeader: map[string]string{"Header1": "value1", "Header2": "value2"},
},
{
name: "empty path - slash should be added",
method: "GET",
requestPath: "",
expectedPath: "/",
},
{
name: "remove CORS headers",
method: "GET",
requestPath: "/some/path",
expectedPath: "/some/path",
responseHeader: map[string]string{
"Header1": "value1",
"Access-Control-Allow-Origin": "some.server",
"Access-Control-Allow-Methods": "GET"},
expectedRespHeader: map[string]string{
"Header1": "value1",
},
notExpectedRespHeader: []string{
"Access-Control-Allow-Origin",
"Access-Control-Allow-Methods",
},
},
}
for i, test := range tests {
func() {
backendResponse := "<html><head></head><body><a href=\"/test/path\">Hello</a></body></html>"
backendResponseHeader := test.responseHeader
// Test a simple header if not specified in the test
if backendResponseHeader == nil && test.expectedRespHeader == nil {
backendResponseHeader = map[string]string{"Content-Type": "text/html"}
test.expectedRespHeader = map[string]string{"Content-Type": "text/html"}
}
backendHandler := &SimpleBackendHandler{
responseBody: backendResponse,
responseHeader: backendResponseHeader,
}
backendServer := httptest.NewServer(backendHandler)
defer backendServer.Close()
responder := &fakeResponder{t: t}
backendURL, _ := url.Parse(backendServer.URL)
backendURL.Path = test.requestPath
proxyHandler := &UpgradeAwareProxyHandler{
Location: backendURL,
Responder: responder,
UpgradeRequired: test.upgradeRequired,
}
proxyServer := httptest.NewServer(proxyHandler)
defer proxyServer.Close()
proxyURL, _ := url.Parse(proxyServer.URL)
proxyURL.Path = test.requestPath
paramValues := url.Values{}
for k, v := range test.requestParams {
paramValues[k] = []string{v}
}
proxyURL.RawQuery = paramValues.Encode()
var requestBody io.Reader
if test.requestBody != "" {
requestBody = bytes.NewBufferString(test.requestBody)
}
req, err := http.NewRequest(test.method, proxyURL.String(), requestBody)
if test.requestHeader != nil {
header := http.Header{}
for k, v := range test.requestHeader {
header.Add(k, v)
}
req.Header = header
}
if err != nil {
t.Errorf("Error creating client request: %v", err)
}
client := &http.Client{}
res, err := client.Do(req)
if err != nil {
t.Errorf("Error from proxy request: %v", err)
}
if test.expectError != nil {
if !responder.called {
t.Errorf("%d: responder was not invoked", i)
return
}
if !test.expectError(responder.err) {
t.Errorf("%d: unexpected error: %v", i, responder.err)
}
return
}
// Validate backend request
// Method
if backendHandler.requestMethod != test.method {
t.Errorf("Unexpected request method: %s. Expected: %s",
backendHandler.requestMethod, test.method)
}
// Body
if string(backendHandler.requestBody) != test.requestBody {
t.Errorf("Unexpected request body: %s. Expected: %s",
string(backendHandler.requestBody), test.requestBody)
}
// Path
if backendHandler.requestURL.Path != test.expectedPath {
t.Errorf("Unexpected request path: %s", backendHandler.requestURL.Path)
}
// Parameters
validateParameters(t, test.name, backendHandler.requestURL.Query(), test.requestParams)
// Headers
validateHeaders(t, test.name+" backend request", backendHandler.requestHeader,
test.requestHeader, nil)
// Validate proxy response
// Response Headers
validateHeaders(t, test.name+" backend headers", res.Header, test.expectedRespHeader, test.notExpectedRespHeader)
// Validate Body
responseBody, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Errorf("Unexpected error reading response body: %v", err)
}
if rb := string(responseBody); rb != backendResponse {
t.Errorf("Did not get expected response body: %s. Expected: %s", rb, backendResponse)
}
// Error
if responder.called {
t.Errorf("Unexpected proxy handler error: %v", responder.err)
}
}()
}
}
func TestProxyUpgrade(t *testing.T) {
localhostPool := x509.NewCertPool()
if !localhostPool.AppendCertsFromPEM(localhostCert) {
t.Errorf("error setting up localhostCert pool")
}
testcases := map[string]struct {
ServerFunc func(http.Handler) *httptest.Server
ProxyTransport http.RoundTripper
}{
"http": {
ServerFunc: httptest.NewServer,
ProxyTransport: nil,
},
"https (invalid hostname + InsecureSkipVerify)": {
ServerFunc: func(h http.Handler) *httptest.Server {
cert, err := tls.X509KeyPair(exampleCert, exampleKey)
if err != nil {
t.Errorf("https (invalid hostname): proxy_test: %v", err)
}
ts := httptest.NewUnstartedServer(h)
ts.TLS = &tls.Config{
Certificates: []tls.Certificate{cert},
}
ts.StartTLS()
return ts
},
ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}),
},
"https (valid hostname + RootCAs)": {
ServerFunc: func(h http.Handler) *httptest.Server {
cert, err := tls.X509KeyPair(localhostCert, localhostKey)
if err != nil {
t.Errorf("https (valid hostname): proxy_test: %v", err)
}
ts := httptest.NewUnstartedServer(h)
ts.TLS = &tls.Config{
Certificates: []tls.Certificate{cert},
}
ts.StartTLS()
return ts
},
ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: &tls.Config{RootCAs: localhostPool}}),
},
"https (valid hostname + RootCAs + custom dialer)": {
ServerFunc: func(h http.Handler) *httptest.Server {
cert, err := tls.X509KeyPair(localhostCert, localhostKey)
if err != nil {
t.Errorf("https (valid hostname): proxy_test: %v", err)
}
ts := httptest.NewUnstartedServer(h)
ts.TLS = &tls.Config{
Certificates: []tls.Certificate{cert},
}
ts.StartTLS()
return ts
},
ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{Dial: net.Dial, TLSClientConfig: &tls.Config{RootCAs: localhostPool}}),
},
}
// Enable StreamingProxyRedirects for test.
utilfeature.DefaultFeatureGate.Set(string(features.StreamingProxyRedirects) + "=true")
for k, tc := range testcases {
for _, redirect := range []bool{false, true} {
tcName := k
backendPath := "/hello"
if redirect {
tcName += " with redirect"
backendPath = "/redirect"
}
func() { // Cleanup after each test case.
backend := http.NewServeMux()
backend.Handle("/hello", websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
body := make([]byte, 5)
ws.Read(body)
ws.Write([]byte("hello " + string(body)))
}))
backend.Handle("/redirect", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/hello", http.StatusFound)
}))
backendServer := tc.ServerFunc(backend)
defer backendServer.Close()
serverURL, _ := url.Parse(backendServer.URL)
serverURL.Path = backendPath
proxyHandler := &UpgradeAwareProxyHandler{
Location: serverURL,
Transport: tc.ProxyTransport,
InterceptRedirects: redirect,
Responder: &noErrorsAllowed{t: t},
}
proxy := httptest.NewServer(proxyHandler)
defer proxy.Close()
ws, err := websocket.Dial("ws://"+proxy.Listener.Addr().String()+"/some/path", "", "http://127.0.0.1/")
if err != nil {
t.Fatalf("%s: websocket dial err: %s", tcName, err)
}
defer ws.Close()
if _, err := ws.Write([]byte("world")); err != nil {
t.Fatalf("%s: write err: %s", tcName, err)
}
response := make([]byte, 20)
n, err := ws.Read(response)
if err != nil {
t.Fatalf("%s: read err: %s", tcName, err)
}
if e, a := "hello world", string(response[0:n]); e != a {
t.Fatalf("%s: expected '%#v', got '%#v'", tcName, e, a)
}
}()
}
}
}
type noErrorsAllowed struct {
t *testing.T
}
func (r *noErrorsAllowed) Error(err error) {
r.t.Error(err)
}
func TestProxyUpgradeErrorResponse(t *testing.T) {
var (
responder *fakeResponder
expectedErr = errors.New("EXPECTED")
)
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
transport := http.DefaultTransport.(*http.Transport)
transport.Dial = func(network, addr string) (net.Conn, error) {
return &fakeConn{err: expectedErr}, nil
}
responder = &fakeResponder{t: t, w: w}
proxyHandler := &UpgradeAwareProxyHandler{
Location: &url.URL{
Host: "fake-backend",
},
UpgradeRequired: true,
Responder: responder,
Transport: transport,
}
proxyHandler.ServeHTTP(w, r)
}))
defer proxy.Close()
// Send request to proxy server.
req, err := http.NewRequest("POST", "http://"+proxy.Listener.Addr().String()+"/some/path", nil)
require.NoError(t, err)
req.Header.Set(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
// Expect error response.
assert.True(t, responder.called)
assert.Equal(t, fakeStatusCode, resp.StatusCode)
msg, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
assert.Contains(t, string(msg), expectedErr.Error())
}
func TestDefaultProxyTransport(t *testing.T) {
tests := []struct {
name,
url,
location,
expectedScheme,
expectedHost,
expectedPathPrepend string
}{
{
name: "simple path",
url: "http://test.server:8080/a/test/location",
location: "http://localhost/location",
expectedScheme: "http",
expectedHost: "test.server:8080",
expectedPathPrepend: "/a/test",
},
{
name: "empty path",
url: "http://test.server:8080/a/test/",
location: "http://localhost",
expectedScheme: "http",
expectedHost: "test.server:8080",
expectedPathPrepend: "/a/test",
},
{
name: "location ending in slash",
url: "http://test.server:8080/a/test/",
location: "http://localhost/",
expectedScheme: "http",
expectedHost: "test.server:8080",
expectedPathPrepend: "/a/test",
},
}
for _, test := range tests {
locURL, _ := url.Parse(test.location)
URL, _ := url.Parse(test.url)
h := UpgradeAwareProxyHandler{
Location: locURL,
}
result := h.defaultProxyTransport(URL, nil)
transport := result.(*corsRemovingTransport).RoundTripper.(*proxy.Transport)
if transport.Scheme != test.expectedScheme {
t.Errorf("%s: unexpected scheme. Actual: %s, Expected: %s", test.name, transport.Scheme, test.expectedScheme)
}
if transport.Host != test.expectedHost {
t.Errorf("%s: unexpected host. Actual: %s, Expected: %s", test.name, transport.Host, test.expectedHost)
}
if transport.PathPrepend != test.expectedPathPrepend {
t.Errorf("%s: unexpected path prepend. Actual: %s, Expected: %s", test.name, transport.PathPrepend, test.expectedPathPrepend)
}
}
}
func TestProxyRequestContentLengthAndTransferEncoding(t *testing.T) {
chunk := func(data []byte) []byte {
out := &bytes.Buffer{}
chunker := httputil.NewChunkedWriter(out)
for _, b := range data {
if _, err := chunker.Write([]byte{b}); err != nil {
panic(err)
}
} | }
zip := func(data []byte) []byte {
out := &bytes.Buffer{}
zipper := gzip.NewWriter(out)
if _, err := zipper.Write(data); err != nil {
panic(err)
}
zipper.Close()
return out.Bytes()
}
sampleData := []byte("abcde")
table := map[string]struct {
reqHeaders http.Header
reqBody []byte
expectedHeaders http.Header
expectedBody []byte
}{
"content-length": {
reqHeaders: http.Header{
"Content-Length": []string{"5"},
},
reqBody: sampleData,
expectedHeaders: http.Header{
"Content-Length": []string{"5"},
"Content-Encoding": nil, // none set
"Transfer-Encoding": nil, // none set
},
expectedBody: sampleData,
},
"content-length + identity transfer-encoding": {
reqHeaders: http.Header{
"Content-Length": []string{"5"},
"Transfer-Encoding": []string{"identity"},
},
reqBody: sampleData,
expectedHeaders: http.Header{
"Content-Length": []string{"5"},
"Content-Encoding": nil, // none set
"Transfer-Encoding": nil, // gets removed
},
expectedBody: sampleData,
},
"content-length + gzip content-encoding": {
reqHeaders: http.Header{
"Content-Length": []string{strconv.Itoa(len(zip(sampleData)))},
"Content-Encoding": []string{"gzip"},
},
reqBody: zip(sampleData),
expectedHeaders: http.Header{
"Content-Length": []string{strconv.Itoa(len(zip(sampleData)))},
"Content-Encoding": []string{"gzip"},
"Transfer-Encoding": nil, // none set
},
expectedBody: zip(sampleData),
},
"chunked transfer-encoding": {
reqHeaders: http.Header{
"Transfer-Encoding": []string{"chunked"},
},
reqBody: chunk(sampleData),
expectedHeaders: http.Header{
"Content-Length": nil, // none set
"Content-Encoding": nil, // none set
"Transfer-Encoding": nil, // Transfer-Encoding gets removed
},
expectedBody: sampleData, // sample data is unchunked
},
"chunked transfer-encoding + gzip content-encoding": {
reqHeaders: http.Header{
"Content-Encoding": []string{"gzip"},
"Transfer-Encoding": []string{"chunked"},
},
reqBody: chunk(zip(sampleData)),
expectedHeaders: http.Header{
"Content-Length": nil, // none set
"Content-Encoding": []string{"gzip"},
"Transfer-Encoding": nil, // gets removed
},
expectedBody: zip(sampleData), // sample data is unchunked, but content-encoding is preserved
},
// "Transfer-Encoding: gzip" is not supported by go
// See http/transfer.go#fixTransferEncoding (https://golang.org/src/net/http/transfer.go#L427)
// Once it is supported, this test case should succeed
//
// "gzip+chunked transfer-encoding": {
// reqHeaders: http.Header{
// "Transfer-Encoding": []string{"chunked,gzip"},
// },
// reqBody: chunk(zip(sampleData)),
//
// expectedHeaders: http.Header{
// "Content-Length": nil, // no content-length headers
// "Transfer-Encoding": nil, // Transfer-Encoding gets removed
// },
// expectedBody: sampleData,
// },
}
successfulResponse := "backend passed tests"
for k, item := range table {
// Start the downstream server
downstreamServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// Verify headers
for header, v := range item.expectedHeaders {
if !reflect.DeepEqual(v, req.Header[header]) {
t.Errorf("%s: Expected headers for %s to be %v, got %v", k, header, v, req.Header[header])
}
}
// Read body
body, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Errorf("%s: unexpected error %v", k, err)
}
req.Body.Close()
// Verify length
if req.ContentLength > 0 && req.ContentLength != int64(len(body)) {
t.Errorf("%s: ContentLength was %d, len(data) was %d", k, req.ContentLength, len(body))
}
// Verify content
if !bytes.Equal(item.expectedBody, body) {
t.Errorf("%s: Expected %q, got %q", k, string(item.expectedBody), string(body))
}
// Write successful response
w.Write([]byte(successfulResponse))
}))
defer downstreamServer.Close()
responder := &fakeResponder{t: t}
backendURL, _ := url.Parse(downstreamServer.URL)
proxyHandler := &UpgradeAwareProxyHandler{
Location: backendURL,
Responder: responder,
UpgradeRequired: false,
}
proxyServer := httptest.NewServer(proxyHandler)
defer proxyServer.Close()
// Dial the proxy server
conn, err := net.Dial(proxyServer.Listener.Addr().Network(), proxyServer.Listener.Addr().String())
if err != nil {
t.Errorf("unexpected error %v", err)
continue
}
defer conn.Close()
// Add standard http 1.1 headers
if item.reqHeaders == nil {
item.reqHeaders = http.Header{}
}
item.reqHeaders.Add("Connection", "close")
item.reqHeaders.Add("Host", proxyServer.Listener.Addr().String())
// Write the request headers
if _, err := fmt.Fprint(conn, "POST / HTTP/1.1\r\n"); err != nil {
t.Fatalf("%s unexpected error %v", k, err)
}
for header, values := range item.reqHeaders {
for _, value := range values {
if _, err := fmt.Fprintf(conn, "%s: %s\r\n", header, value); err != nil {
t.Fatalf("%s: unexpected error %v", k, err)
}
}
}
// Header separator
if _, err := fmt.Fprint(conn, "\r\n"); err != nil {
t.Fatalf("%s: unexpected error %v", k, err)
}
// Body
if _, err := conn.Write(item.reqBody); err != nil {
t.Fatalf("%s: unexpected error %v", k, err)
}
// Read response
response, err := ioutil.ReadAll(conn)
if err != nil {
t.Errorf("%s: unexpected error %v", k, err)
continue
}
if !strings.HasSuffix(string(response), successfulResponse) {
t.Errorf("%s: Did not get successful response: %s", k, string(response))
continue
}
}
}
// exampleCert was generated from crypto/tls/generate_cert.go with the following command:
// go run generate_cert.go --rsa-bits 512 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var exampleCert = []byte(`-----BEGIN CERTIFICATE-----
MIIBcjCCAR6gAwIBAgIQBOUTYowZaENkZi0faI9DgTALBgkqhkiG9w0BAQswEjEQ
MA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2MDAw
MFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCZ
xfR3sgeHBraGFfF/24tTn4PRVAHOf2UOOxSQRs+aYjNqimFqf/SRIblQgeXdBJDR
gVK5F1Js2zwlehw0bHxRAgMBAAGjUDBOMA4GA1UdDwEB/wQEAwIApDATBgNVHSUE
DDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBYGA1UdEQQPMA2CC2V4YW1w
bGUuY29tMAsGCSqGSIb3DQEBCwNBAI/mfBB8dm33IpUl+acSyWfL6gX5Wc0FFyVj
dKeesE1XBuPX1My/rzU6Oy/YwX7LOL4FaeNUS6bbL4axSLPKYSs=
-----END CERTIFICATE-----`)
var exampleKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIBOgIBAAJBAJnF9HeyB4cGtoYV8X/bi1Ofg9FUAc5/ZQ47FJBGz5piM2qKYWp/
9JEhuVCB5d0EkNGBUrkXUmzbPCV6HDRsfFECAwEAAQJBAJLH9yPuButniACTn5L5
IJQw1mWQt6zBw9eCo41YWkA0866EgjC53aPZaRjXMp0uNJGdIsys2V5rCOOLWN2C
ODECIQDICHsi8QQQ9wpuJy8X5l8MAfxHL+DIqI84wQTeVM91FQIhAMTME8A18/7h
1Ad6drdnxAkuC0tX6Sx0LDozrmen+HFNAiAlcEDrt0RVkIcpOrg7tuhPLQf0oudl
Zvb3Xlj069awSQIgcT15E/43w2+RASifzVNhQ2MCTr1sSA8lL+xzK+REmnUCIBhQ
j4139pf8Re1J50zBxS/JlQfgDQi9sO9pYeiHIxNs
-----END RSA PRIVATE KEY-----`)
// localhostCert was generated from crypto/tls/generate_cert.go with the following command:
// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD
bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj
bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa
IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA
AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud
EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA
AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk
Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA==
-----END CERTIFICATE-----`)
// localhostKey is the private key for localhostCert.
var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0
0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV
NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d
AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW
MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD
EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA
1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE=
-----END RSA PRIVATE KEY-----`) | chunker.Close()
out.Write([]byte("\r\n"))
return out.Bytes() |
codec.rs | use std::{io, str};
use bincode::{self, Infinite, deserialize_from, serialize};
use bytes::{Buf, Bytes, BytesMut, IntoBuf};
use futures::{Poll, Sink, StartSend, Stream};
use serde::{Deserialize, Serialize};
use tokio_serde::{Deserializer, FramedRead, FramedWrite, Serializer};
use codec::{Encoder, Decoder};
#[derive(Debug, Error)]
pub enum Error {
Io(io::Error),
Serde(bincode::Error),
}
pub struct Bincode<L> {
size_limit: L,
}
impl Bincode<L: SizeLimit> {
pub fn new(size_limit: L) -> Bincode<L> { Bincode {
size_limit: L
} }
}
impl<T, L> Deserializer<T> for Bincode<L>
where for<'de> T: Deserialize<'de>,
{
type Error = Error;
fn deserialize(&mut self, src: &Bytes) -> Result<T, Error> {
deserialize_from(&mut src.into_buf().reader(), self.size_limit)
.map_err(Error::Serde)
}
}
impl<T> Serializer<T> for Bincode<T>
where T: Serialize
{
type Error = io::Error;
fn serialize(&mut self, item: &T) -> Result<BytesMut, io::Error> |
}
| {
serialize(item, self.size_limit)
.map(Into::into)
.map_err(|error| io::Error::new(io::ErrorKind::Other, error))
} |
app.controller.spec.ts | import { Test, TestingModule } from '@nestjs/testing';
import { AppController } from './app.controller';
import { AppService } from './app.service';
describe('AppController', () => {
let appController: AppController;
beforeEach(async () => {
const app: TestingModule = await Test.createTestingModule({
controllers: [AppController],
providers: [AppService],
}).compile();
appController = app.get<AppController>(AppController);
});
describe('root', () => { | test('should return "Hello World!"', () => {
expect(appController.getHello()).toBe('Hello World!');
});
});
}); | |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Cryptography related types, constants, traits and functions. The functions
//! in this library are used for key generation, hashing, signing and signature
//! verification.
//!
//! The Crypto library makes it possible to potentially change the type of
//! cryptography applied in the system and add abstractions best
//! suited for Exonum.
#![warn(
missing_debug_implementations,
missing_docs,
unsafe_code,
bare_trait_objects
)]
#![warn(clippy::pedantic, clippy::nursery)]
#![allow(
// Next `cast_*` lints don't give alternatives.
clippy::cast_possible_wrap, clippy::cast_possible_truncation, clippy::cast_sign_loss,
// Next lints produce too much noise/false positives.
clippy::module_name_repetitions, clippy::similar_names, clippy::must_use_candidate,
clippy::pub_enum_variant_names,
// '... may panic' lints.
clippy::indexing_slicing,
// Too much work to fix.
clippy::missing_errors_doc, clippy::missing_const_for_fn
)]
#[macro_use]
extern crate serde_derive; // Required for Protobuf.
#[doc(inline)]
pub use self::crypto_impl::{
HASH_SIZE, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, SEED_LENGTH, SIGNATURE_LENGTH,
};
#[cfg(feature = "sodiumoxide-crypto")]
pub use self::crypto_lib::sodiumoxide::x25519;
#[cfg(feature = "with-protobuf")]
#[doc(hidden)]
pub mod proto;
use hex::{encode as encode_hex, FromHex, FromHexError, ToHex};
use serde::{
de::{self, Deserialize, Deserializer, Visitor},
Serialize, Serializer,
};
use std::{
default::Default,
fmt,
ops::{Index, Range, RangeFrom, RangeFull, RangeTo},
};
// A way to set an active cryptographic backend is to export it as `crypto_impl`.
#[cfg(feature = "sodiumoxide-crypto")]
use self::crypto_lib::sodiumoxide as crypto_impl;
#[macro_use]
mod macros;
pub(crate) mod crypto_lib;
/// The size to crop the string in debug messages.
const BYTES_IN_DEBUG: usize = 4;
/// The size of ellipsis in debug messages.
const BYTES_IN_ELLIPSIS: usize = 3;
fn write_short_hex(f: &mut impl fmt::Write, slice: &[u8]) -> fmt::Result {
for byte in slice.iter().take(BYTES_IN_DEBUG) {
write!(f, "{:02x}", byte)?;
}
if slice.len() > BYTES_IN_DEBUG {
write!(f, "...")?;
}
Ok(())
}
/// Signs a slice of bytes using the signer's secret key and returns the
/// resulting `Signature`.
///
/// # Examples
///
/// The example below generates a pair of secret and public keys, indicates
/// certain data, signs the data using the secret key and with the help of
/// the public key verifies that the data have been signed with the corresponding
/// secret key.
///
/// ```
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair();
/// let data = [1, 2, 3];
/// let signature = exonum_crypto::sign(&data, &secret_key);
/// assert!(exonum_crypto::verify(&signature, &data, &public_key));
/// ```
pub fn sign(data: &[u8], secret_key: &SecretKey) -> Signature {
let impl_signature = crypto_impl::sign(data, &secret_key.0);
Signature(impl_signature)
}
/// Computes a secret key and a corresponding public key from a `Seed`.
///
/// # Examples
///
/// The example below generates a keypair that depends on the indicated seed.
/// Indicating the same seed value always results in the same keypair.
///
/// ```
/// use exonum_crypto::{SEED_LENGTH, Seed};
///
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair_from_seed(&Seed::new([1; SEED_LENGTH]));
/// ```
pub fn gen_keypair_from_seed(seed: &Seed) -> (PublicKey, SecretKey) {
let (impl_pub_key, impl_secret_key) = crypto_impl::gen_keypair_from_seed(&seed.0);
(PublicKey(impl_pub_key), SecretKey(impl_secret_key))
}
/// Generates a secret key and a corresponding public key using a cryptographically secure
/// pseudo-random number generator.
///
/// # Examples
///
/// The example below generates a unique keypair.
///
/// ```
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair();
/// ```
pub fn gen_keypair() -> (PublicKey, SecretKey) {
let (pubkey, secret_key) = crypto_impl::gen_keypair();
(PublicKey(pubkey), SecretKey(secret_key))
}
/// Verifies that `data` is signed with a secret key corresponding to the
/// given public key.
///
/// # Examples
///
/// The example below generates a pair of secret and public keys, indicates
/// certain data, signs the data using the secret key and with the help of the public key
/// verifies that the data have been signed with the corresponding secret key.
///
/// ```
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair();
/// let data = [1, 2, 3];
/// let signature = exonum_crypto::sign(&data, &secret_key);
/// assert!(exonum_crypto::verify(&signature, &data, &public_key));
/// ```
pub fn verify(sig: &Signature, data: &[u8], pubkey: &PublicKey) -> bool {
crypto_impl::verify(&sig.0, data, &pubkey.0)
}
/// Calculates a hash of a bytes slice.
///
/// Type of a hash depends on a chosen crypto backend (via `...-crypto` cargo feature).
///
/// # Examples
///
/// The example below calculates the hash of the indicated data.
///
/// ```
/// # exonum_crypto::init();
/// let data = [1, 2, 3];
/// let hash = exonum_crypto::hash(&data);
/// ```
pub fn hash(data: &[u8]) -> Hash {
let dig = crypto_impl::hash(data);
Hash(dig)
}
/// Initializes the cryptographic backend.
///
/// # Panics
///
/// Panics if backend initialization is failed.
///
/// # Examples
///
/// ```
/// exonum_crypto::init();
/// ```
pub fn init() {
if !crypto_impl::init() {
panic!("Cryptographic library initialization failed.");
}
}
/// This structure provides a possibility to calculate a hash digest
/// for a stream of data. Unlike the
/// [`Hash` structure](struct.Hash.html),
/// the given structure lets the code process several data chunks without
/// the need to copy them into a single buffer.
///
/// # Examples
///
/// The example below indicates the data the code is working with; runs the
/// system hash update as many times as required to process all the data chunks
/// and calculates the resulting hash of the system.
///
/// ```rust
/// use exonum_crypto::HashStream;
///
/// let data: Vec<[u8; 5]> = vec![[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]];
/// let mut hash_stream = HashStream::new();
/// for chunk in data {
/// hash_stream = hash_stream.update(&chunk);
/// }
/// let _ = hash_stream.hash();
/// ```
#[derive(Debug, Default)]
pub struct HashStream(crypto_impl::HashState);
impl HashStream {
/// Creates a new instance of `HashStream`.
pub fn new() -> Self {
Self(crypto_impl::HashState::init())
}
/// Processes a chunk of stream and returns a `HashStream` with the updated internal state.
pub fn update(mut self, chunk: &[u8]) -> Self {
self.0.update(chunk);
self
}
/// Returns the resulting hash of the system calculated upon the commit
/// of currently supplied data.
pub fn hash(self) -> Hash {
let dig = self.0.finalize();
Hash(dig)
}
}
/// This structure provides a possibility to create and/or verify
/// digital signatures for a stream of data. If the data are split into several
/// chunks, the indicated chunks are added to the system and when adding is
/// complete, the data is signed.
///
/// # Examples
///
/// The example below adds several data chunks to the system, generates a pair
/// of random public and secret keys, signs the data and verifies the signature.
///
/// ```rust
/// use exonum_crypto::{SignStream, gen_keypair};
///
/// let data: Vec<[u8; 5]> = vec![[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]];
/// let (public_key, secret_key) = gen_keypair();
/// let mut create_stream = SignStream::new();
/// let mut verify_stream = SignStream::new();
/// for chunk in data {
/// create_stream = create_stream.update(&chunk);
/// verify_stream = verify_stream.update(&chunk);
/// }
/// let file_sign = create_stream.sign(&secret_key);
/// assert!(verify_stream.verify(&file_sign, &public_key));
/// ```
#[derive(Debug, Default)]
pub struct SignStream(crypto_impl::SignState);
impl SignStream {
/// Creates a new instance of `SignStream`.
///
/// # Examples
///
/// ```
/// use exonum_crypto::SignStream;
///
/// let stream = SignStream::new();
/// ```
pub fn new() -> Self {
Self(crypto_impl::SignState::init())
}
/// Adds a new `chunk` to the message that will eventually be signed and/or verified.
///
/// # Examples
///
/// ```
/// use exonum_crypto::SignStream;
///
/// let mut stream = SignStream::new();
///
/// let data = &[[1, 2, 3], [4, 5, 6], [7, 8, 9]];
/// for chunk in data.iter() {
/// stream = stream.update(chunk);
/// }
/// ```
pub fn update(mut self, chunk: &[u8]) -> Self {
self.0.update(chunk);
self
}
/// Computes and returns a signature for the previously supplied message
/// using the given `secret_key`.
///
/// # Examples
///
/// ```
/// use exonum_crypto::{SignStream, gen_keypair};
///
/// let mut stream = SignStream::new();
///
/// let data = &[[1, 2, 3], [4, 5, 6], [7, 8, 9]];
/// for chunk in data.iter() {
/// stream = stream.update(chunk);
/// }
///
/// let (public_key, secret_key) = gen_keypair();
/// let signature = stream.sign(&secret_key);
/// ```
pub fn sign(&mut self, secret_key: &SecretKey) -> Signature {
Signature(self.0.finalize(&secret_key.0))
}
/// Verifies that `sig` is a valid signature for the previously supplied message
/// using the given `public_key`.
///
/// # Examples
///
/// ```
/// use exonum_crypto::{SignStream, gen_keypair};
///
/// let mut stream = SignStream::new();
/// let mut verify_stream = SignStream::new();
///
/// let data = &[[1, 2, 3], [4, 5, 6], [7, 8, 9]];
/// for chunk in data.iter() {
/// stream = stream.update(chunk);
/// verify_stream = verify_stream.update(chunk);
/// }
///
/// let (public_key, secret_key) = gen_keypair();
/// let signature = stream.sign(&secret_key);
/// assert!(verify_stream.verify(&signature, &public_key));
/// ```
pub fn verify(&mut self, sig: &Signature, public_key: &PublicKey) -> bool {
self.0.verify(&sig.0, &public_key.0)
}
}
implement_public_crypto_wrapper! {
/// Ed25519 public key used to verify digital signatures.
///
/// In public-key cryptography, the system uses a a mathematically related pair
/// of keys: a public key, which is openly distributed, and a secret key,
/// which should remain confidential. For more information, refer to
/// [Public-key cryptography](https://en.wikipedia.org/wiki/Public-key_cryptography).
///
/// Ed25519 is a signature system that ensures fast signing and key generation,
/// as well as security and collision resilience.
///
/// # Examples
///
/// In the example below, the function generates a pair of random public and
/// secret keys.
///
/// ```
/// # exonum_crypto::init();
/// let (public_key, _) = exonum_crypto::gen_keypair();
/// ```
struct PublicKey, PUBLIC_KEY_LENGTH
}
implement_private_crypto_wrapper! {
/// Ed25519 secret key used to create digital signatures over messages.
///
/// In public-key cryptography, the system uses a a mathematically related pair
/// of keys: a public key, which is openly distributed, and a secret key,
/// which should remain confidential. For more information, refer to
/// [Public-key cryptography](https://en.wikipedia.org/wiki/Public-key_cryptography).
///
/// Ed25519 is a signature system that ensures fast signing and key generation,
/// as well as security and collision resilience.
///
/// # Examples
///
/// In the example below, the function generates a pair of random public and
/// secret keys.
///
/// ```
/// # exonum_crypto::init();
/// let (_, secret_key) = exonum_crypto::gen_keypair();
/// ```
struct SecretKey, SECRET_KEY_LENGTH
}
implement_public_crypto_wrapper! {
/// The result of applying the SHA-256 hash function to data.
///
/// This function splits the input data into blocks and runs each block
/// through a cycle of 64 iterations. The result of the function is a hash
/// 256 bits or 32 bytes in length.
///
/// # Examples
///
/// The example below generates the hash of the indicated data.
///
/// ```
/// use exonum_crypto::Hash;
///
/// let data = [1, 2, 3];
/// let hash_from_data = exonum_crypto::hash(&data);
/// let default_hash = Hash::default();
/// ```
struct Hash, HASH_SIZE
}
implement_public_crypto_wrapper! {
/// Ed25519 digital signature. This structure creates a signature over data
/// using a secret key. Later it is possible to verify, using the corresponding
/// public key, that the data have indeed been signed with that secret key.
///
/// Ed25519 is a signature system that ensures fast signing and key generation,
/// as well as security and collision resilience.
///
/// # Examples
///
/// The example below generates a pair of random public and secret keys,
/// adds certain data, signs the data using the secret key and verifies
/// that the data have been signed with that secret key.
///
/// ```
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair();
/// let data = [1, 2, 3];
/// let signature = exonum_crypto::sign(&data, &secret_key);
/// assert!(exonum_crypto::verify(&signature, &data, &public_key));
/// ```
struct Signature, SIGNATURE_LENGTH
}
implement_private_crypto_wrapper! {
/// Ed25519 seed representing a succession of bytes that can be used for
/// deterministic keypair generation. If the same seed is indicated in the
/// generator multiple times, the generated keys will be the same each time.
///
/// Note that this is not the seed added to Exonum transactions for additional
/// security, this is a separate entity. This structure is useful for testing,
/// to receive repeatable results. The seed in this structure is either set
/// manually or selected using the methods below.
///
/// # Examples
///
/// The example below generates a pair of public and secret keys taking
/// into account the selected seed. The same seed will always lead to
/// generation of the same keypair.
///
/// ```
/// use exonum_crypto::{SEED_LENGTH, Seed};
///
/// # exonum_crypto::init();
/// let (public_key, secret_key) = exonum_crypto::gen_keypair_from_seed(&Seed::new([1; SEED_LENGTH]));
/// ```
struct Seed, SEED_LENGTH
}
implement_serde! {Hash}
implement_serde! {PublicKey}
implement_serde! {SecretKey}
implement_serde! {Seed}
implement_serde! {Signature}
implement_index_traits! {Hash}
implement_index_traits! {PublicKey}
implement_index_traits! {SecretKey}
implement_index_traits! {Seed}
implement_index_traits! {Signature}
/// Pair of matching secret and public keys.
///
/// Prefer using this struct to `(PublicKey, SecretKey)`, since it asserts that public
/// and secret keys necessarily match.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct KeyPair {
public_key: PublicKey,
secret_key: SecretKey,
}
impl KeyPair {
/// Creates a keypair from the provided keys, checking that they correspond to each other.
///
/// # Panics
///
/// - If the keys do not match.
pub fn from_keys(public_key: PublicKey, secret_key: SecretKey) -> Self {
assert!(
verify_keys_match(&public_key, &secret_key),
"Public key does not match the secret key."
);
Self {
public_key,
secret_key,
}
}
/// Generates a random keypair using the random number generator provided by the crypto backend.
pub fn random() -> Self {
let (public_key, secret_key) = gen_keypair();
Self {
public_key,
secret_key,
}
}
/// Generates a keypair from the provided seed.
pub fn from_seed(seed: &Seed) -> Self {
let (public_key, secret_key) = gen_keypair_from_seed(seed);
Self {
public_key,
secret_key,
}
}
/// Gets the public key.
pub fn public_key(&self) -> PublicKey {
self.public_key
}
/// Gets a reference to the secret key.
pub fn secret_key(&self) -> &SecretKey {
&self.secret_key
}
}
impl From<(PublicKey, SecretKey)> for KeyPair {
fn from(keys: (PublicKey, SecretKey)) -> Self {
Self::from_keys(keys.0, keys.1)
}
}
fn verify_keys_match(public_key: &PublicKey, secret_key: &SecretKey) -> bool |
#[cfg(test)]
mod tests {
use super::{
fmt, gen_keypair, hash, Hash, HashStream, KeyPair, PublicKey, SecretKey, Seed, Serialize,
SignStream, Signature, HASH_SIZE, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, SEED_LENGTH,
SIGNATURE_LENGTH,
};
use hex::FromHex;
use serde::de::DeserializeOwned;
use std::str::FromStr;
#[test]
fn to_from_hex_hash() {
let original = hash(&[]);
let from_hex = Hash::from_hex(original.to_hex()).unwrap();
assert_eq!(original, from_hex);
}
#[test]
fn to_from_string_hash() {
let original = hash(&[]);
let from_hex = Hash::from_str(&original.to_string()).unwrap();
assert_eq!(original, from_hex);
}
#[test]
fn zero_hash() {
let hash = Hash::zero();
assert_eq!(hash.as_ref(), [0; HASH_SIZE]);
}
#[test]
fn to_from_hex_keys() {
let (p, s) = gen_keypair();
let ph = PublicKey::from_hex(p.to_hex()).unwrap();
assert_eq!(p, ph);
let sh = SecretKey::from_hex(s.to_hex()).unwrap();
assert_eq!(s, sh);
}
#[test]
fn to_from_string_public_key() {
let p = gen_keypair().0;
let ph = PublicKey::from_str(&p.to_string()).unwrap();
assert_eq!(p, ph);
}
#[test]
fn serialize_deserialize_hash() {
assert_serialize_deserialize(&Hash::new([207; HASH_SIZE]));
}
#[test]
fn serialize_deserialize_public_key() {
assert_serialize_deserialize(&PublicKey::new([208; PUBLIC_KEY_LENGTH]));
}
#[test]
fn serialize_deserialize_signature() {
assert_serialize_deserialize(&Signature::new([209; SIGNATURE_LENGTH]));
}
#[test]
fn serialize_deserialize_seed() {
assert_serialize_deserialize(&Seed::new([210; SEED_LENGTH]));
}
#[test]
fn serialize_deserialize_secret_key() {
assert_serialize_deserialize(&SecretKey::new([211; SECRET_KEY_LENGTH]));
}
#[test]
fn debug_format() {
// Check zero padding.
let hash = Hash::new([1; HASH_SIZE]);
assert_eq!(format!("{:?}", &hash), "Hash(\"01010101...\")");
let pk = PublicKey::new([15; PUBLIC_KEY_LENGTH]);
assert_eq!(format!("{:?}", &pk), "PublicKey(\"0f0f0f0f...\")");
let sk = SecretKey::new([8; SECRET_KEY_LENGTH]);
assert_eq!(format!("{:?}", &sk), "SecretKey(\"08080808...\")");
let signature = Signature::new([10; SIGNATURE_LENGTH]);
assert_eq!(format!("{:?}", &signature), "Signature(\"0a0a0a0a...\")");
let seed = Seed::new([4; SEED_LENGTH]);
assert_eq!(format!("{:?}", &seed), "Seed(\"04040404...\")");
// Check no padding.
let hash = Hash::new([128; HASH_SIZE]);
assert_eq!(format!("{:?}", &hash), "Hash(\"80808080...\")");
let sk = SecretKey::new([255; SECRET_KEY_LENGTH]);
assert_eq!(format!("{:?}", &sk), "SecretKey(\"ffffffff...\")");
}
// Note that only public values have Display impl.
#[test]
fn display_format() {
// Check zero padding.
let hash = Hash::new([1; HASH_SIZE]);
assert_eq!(
format!("{}", &hash),
"0101010101010101010101010101010101010101010101010101010101010101"
);
let pk = PublicKey::new([15; PUBLIC_KEY_LENGTH]);
assert_eq!(
format!("{}", &pk),
"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f"
);
let signature = Signature::new([10; SIGNATURE_LENGTH]);
assert_eq!(
format!("{}", &signature),
"0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a\
0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"
);
// Check no padding.
let hash = Hash::new([128; HASH_SIZE]);
assert_eq!(
format!("{}", &hash),
"8080808080808080808080808080808080808080808080808080808080808080"
);
}
#[test]
fn hash_streaming_zero() {
let h1 = hash(&[]);
let state = HashStream::new();
let h2 = state.update(&[]).hash();
assert_eq!(h1, h2);
}
#[test]
fn hash_streaming_chunks() {
let data: [u8; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
let h1 = hash(&data);
let state = HashStream::new();
let h2 = state.update(&data[..5]).update(&data[5..]).hash();
assert_eq!(h1, h2);
}
#[test]
fn sign_streaming_zero() {
let (pk, sk) = gen_keypair();
let mut creation_stream = SignStream::new().update(&[]);
let sig = creation_stream.sign(&sk);
let mut verified_stream = SignStream::new().update(&[]);
assert!(verified_stream.verify(&sig, &pk));
}
#[test]
fn sign_streaming_chunks() {
let data: [u8; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
let (pk, sk) = gen_keypair();
let mut creation_stream = SignStream::new().update(&data[..5]).update(&data[5..]);
let sig = creation_stream.sign(&sk);
let mut verified_stream = SignStream::new().update(&data[..5]).update(&data[5..]);
assert!(verified_stream.verify(&sig, &pk));
}
fn assert_serialize_deserialize<T>(original_value: &T)
where
T: Serialize + DeserializeOwned + PartialEq + fmt::Debug,
{
let json = serde_json::to_string(original_value).unwrap();
let deserialized_value: T = serde_json::from_str(&json).unwrap();
assert_eq!(*original_value, deserialized_value);
}
#[test]
fn valid_keypair() {
let (pk, sk) = gen_keypair();
let _ = KeyPair::from_keys(pk, sk);
}
#[test]
#[should_panic]
fn not_valid_keypair() {
let (pk, _) = gen_keypair();
let (_, sk) = gen_keypair();
let _ = KeyPair::from_keys(pk, sk);
}
}
| {
crypto_impl::verify_keys_match(&public_key.0, &secret_key.0)
} |
PythonEditor.py | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import ast
import sys
import traceback
import imath
import IECore
import Gaffer
import GafferUI
from Qt import QtWidgets
from Qt import QtCore
## \todo Custom right click menu with script load, save, execute file, undo, redo etc.
## \todo Standard way for users to customise all menus
## \todo Tab completion and popup help. rlcompleter module should be useful for tab completion. Completer( dict ) constructs a completer
# that works in a specific namespace.
class PythonEditor( GafferUI.Editor ) :
def __init__( self, scriptNode, **kw ) :
self.__splittable = GafferUI.SplitContainer( borderWidth = 2 )
GafferUI.Editor.__init__( self, self.__splittable, scriptNode, **kw )
self.__outputWidget = GafferUI.MultiLineTextWidget(
editable = False,
wrapMode = GafferUI.MultiLineTextWidget.WrapMode.None_,
role = GafferUI.MultiLineTextWidget.Role.Code,
)
self.__inputWidget = GafferUI.MultiLineTextWidget(
wrapMode = GafferUI.MultiLineTextWidget.WrapMode.None_,
role = GafferUI.MultiLineTextWidget.Role.Code,
)
self.__outputWidget._qtWidget().setProperty( "gafferTextRole", "output" )
self.__inputWidget._qtWidget().setProperty( "gafferTextRole", "input" )
self.__splittable.append( self.__outputWidget )
self.__splittable.append( self.__inputWidget )
self.__inputWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ), scoped = False )
self.__inputWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ), scoped = False )
self.__executionDict = {
"imath" : imath,
"IECore" : IECore,
"Gaffer" : Gaffer,
"GafferUI" : GafferUI,
"root" : scriptNode,
}
def inputWidget( self ) :
return self.__inputWidget
def outputWidget( self ) :
return self.__outputWidget
def execute( self ) :
# decide what to execute
haveSelection = True
toExecute = self.__inputWidget.selectedText()
if not toExecute :
haveSelection = False
toExecute = self.__inputWidget.getText()
# parse it first. this lets us give better error formatting
# for syntax errors, and also figure out whether we can eval()
# and display the result or must exec() only.
try :
parsed = ast.parse( toExecute )
except SyntaxError as e :
self.__outputWidget.appendHTML( self.__syntaxErrorToHTML( e ) )
return
# execute it
self.__outputWidget.appendHTML( self.__codeToHTML( toExecute ) )
with Gaffer.OutputRedirection( stdOut = Gaffer.WeakMethod( self.__redirectOutput ), stdErr = Gaffer.WeakMethod( self.__redirectOutput ) ) :
with _MessageHandler( self.__outputWidget ) :
with Gaffer.UndoScope( self.scriptNode() ) :
with self.getContext() :
try :
if len( parsed.body ) == 1 and isinstance( parsed.body[0], ast.Expr ) :
result = eval( toExecute, self.__executionDict, self.__executionDict )
if result is not None :
self.__outputWidget.appendText( str( result ) )
else :
exec( toExecute, self.__executionDict, self.__executionDict )
if not haveSelection :
self.__inputWidget.setText( "" )
except Exception as e :
self.__outputWidget.appendHTML( self.__exceptionToHTML() )
def __repr__( self ) :
return "GafferUI.PythonEditor( scriptNode )"
def __activated( self, widget ) :
self.execute()
return True
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.GraphComponent ) :
if self.scriptNode().isAncestorOf( dragData ) :
return "root['" + dragData.relativeName( self.scriptNode() ).replace( ".", "']['" ) + "']"
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return "[ " + ", ".join( [ self.__dropText( widget, d ) for d in dragData ] ) + " ]"
elif isinstance( dragData, IECore.CompoundData ) :
return repr( dragData )
elif isinstance( dragData, IECore.Data ) and hasattr( dragData, "value" ) :
return repr( dragData.value )
return None
def __codeToHTML( self, code ) :
code = code.replace( "<", "<" ).replace( ">", ">" )
return "<pre>" + code + "</pre>"
def __syntaxErrorToHTML( self, syntaxError ) :
formatted = traceback.format_exception_only( SyntaxError, syntaxError )
lineNumber = formatted[0].rpartition( "," )[2].strip()
headingText = formatted[-1].replace( ":", " : " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
result += "<br>" + self.__codeToHTML( "".join( formatted[1:-1] ) )
return result
def __exceptionToHTML( self ) :
t = traceback.extract_tb( sys.exc_info()[2] )
lineNumber = str( t[1][1] )
headingText = traceback.format_exception_only( *(sys.exc_info()[:2]) )[0].replace( ":", " : line " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
if len( t ) > 2 :
result += "<br>" + self.__codeToHTML( "".join( traceback.format_list( t[2:] ) ) )
return result
def __redirectOutput( self, output ) :
if output != "\n" :
self.__outputWidget.appendText( output )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtWidgets.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
GafferUI.Editor.registerType( "PythonEditor", PythonEditor )
class | ( IECore.MessageHandler ) :
def __init__( self, textWidget ) :
IECore.MessageHandler.__init__( self )
self.__textWidget = textWidget
def handle( self, level, context, message ) :
html = formatted = "<h1 class='%s'>%s : %s </h1><pre class='message'>%s</pre><br>" % (
IECore.Msg.levelAsString( level ),
IECore.Msg.levelAsString( level ),
context,
message
)
self.__textWidget.appendHTML( html )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtWidgets.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
| _MessageHandler |
tasks_chg.rs | #[doc = "Enable channel group.\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [en](en) module"]
pub type EN = crate::Reg<u32, _EN>; | pub struct _EN;
#[doc = "`write(|w| ..)` method takes [en::W](en::W) writer structure"]
impl crate::Writable for EN {}
#[doc = "Enable channel group."]
pub mod en;
#[doc = "Disable channel group.\n\nThis register you can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dis](dis) module"]
pub type DIS = crate::Reg<u32, _DIS>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _DIS;
#[doc = "`write(|w| ..)` method takes [dis::W](dis::W) writer structure"]
impl crate::Writable for DIS {}
#[doc = "Disable channel group."]
pub mod dis; | #[allow(missing_docs)]
#[doc(hidden)] |
animation.js | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import {
useCallback,
useMemo,
useEffect,
useRef,
useDebouncedCallback,
shallowEqual,
} from '@googleforcreators/react';
import PropTypes from 'prop-types';
import { v4 as uuidv4 } from 'uuid';
import { __ } from '@googleforcreators/i18n';
import styled from 'styled-components';
import {
BACKGROUND_ANIMATION_EFFECTS,
BG_MAX_SCALE,
BG_MIN_SCALE,
DIRECTION,
SCALE_DIRECTION,
hasOffsets,
STORY_ANIMATION_STATE,
getAnimationEffectDefaults,
} from '@googleforcreators/animation';
import { progress } from '@googleforcreators/units';
/**
* Internal dependencies
*/
import StoryPropTypes, { AnimationPropType } from '../../../../types';
import { useStory } from '../../../../app';
import { DESIGN_COPY } from '../../../checklist';
import Warning from '../warning';
import { Row } from '../../../form';
import { SimplePanel } from '../../panel';
import { states, styles, useHighlights } from '../../../../app/highlights';
import EffectPanel, { getEffectName, getEffectDirection } from './effectPanel';
import { EffectChooserDropdown } from './effectChooserDropdown';
const ANIMATION_PROPERTY = 'animation';
const StyledRow = styled(Row)`
margin-bottom: -1px;
`;
const GroupWrapper = styled.div`
${({ hasAnimation, theme }) =>
hasAnimation &&
`
border: 1px solid ${theme.colors.border.defaultNormal};
border-radius: ${theme.borders.radius.small};
`}
margin-bottom: 16px;
`;
const backgroundAnimationTooltip = __(
'The background image is too small to animate. Double click on the bg & scale the image before applying the animation.',
'web-stories'
);
function AnimationPanel({
selectedElements,
selectedElementAnimations,
pushUpdateForObject,
updateAnimationState,
}) {
const isFirstPage = useStory(
({ state: { currentPageNumber } }) => currentPageNumber === 1
);
const playUpdatedAnimation = useRef(false);
const { highlight, resetHighlight } = useHighlights((state) => ({
highlight: state[states.ANIMATION],
resetHighlight: state.onFocusOut,
cancelHighlight: state.cancelEffect,
}));
| // Combining local element updates with the
// page level applied updates
const updated = selectedElements
.map((element) => element.animation)
.filter(Boolean);
return selectedElementAnimations
.map((anim) => ({
...(updated.find((a) => a.id === anim.id) || anim),
}))
.filter((a) => !a.delete);
}, [selectedElements, selectedElementAnimations]);
const handlePanelChange = useCallback(
(animation, submitArg = false) => {
if (shallowEqual(animation, updatedAnimations[0])) {
return;
}
pushUpdateForObject(ANIMATION_PROPERTY, animation, null, submitArg);
playUpdatedAnimation.current = true;
},
[pushUpdateForObject, updatedAnimations]
);
const handleAddOrUpdateElementEffect = useCallback(
({ animation, ...options }) => {
if (!animation) {
return;
}
const id = selectedElementAnimations[0]?.id || uuidv4();
const defaults = getAnimationEffectDefaults(animation);
const persisted =
selectedElementAnimations[0]?.type === animation
? {
duration: selectedElementAnimations[0]?.duration,
delay: selectedElementAnimations[0]?.delay,
}
: {};
pushUpdateForObject(
ANIMATION_PROPERTY,
{
id,
type: animation,
...defaults,
...persisted,
...options,
},
null,
true
);
// There's nothing unique to the animation data to signify that it
// was changed by the effect chooser, so we track it here.
playUpdatedAnimation.current = true;
},
[selectedElementAnimations, pushUpdateForObject]
);
// Play animation of selected elements when effect chooser signals
// that it has changed the data and the data comes back changed.
//
// Currently, animations get reset whenever the designInspector
// gains focus, adding this debouncer and scheduling it after
// the all the focus updates go through prevents the reset from
// overriding this play call.
const activeElement = document.activeElement;
const debouncedUpdateAnimationState = useDebouncedCallback(() => {
if (playUpdatedAnimation.current) {
updateAnimationState({
animationState: STORY_ANIMATION_STATE.PLAYING_SELECTED,
});
playUpdatedAnimation.current = false;
}
}, 300);
useEffect(debouncedUpdateAnimationState, [
selectedElementAnimations,
updateAnimationState,
activeElement,
debouncedUpdateAnimationState,
]);
const handleRemoveEffect = useCallback(() => {
pushUpdateForObject(
ANIMATION_PROPERTY,
{
...updatedAnimations[0],
delete: true,
},
null,
true
);
}, [pushUpdateForObject, updatedAnimations]);
// Figure out if any options are disabled
// for an animation type input
const disabledTypeOptionsMap = useMemo(() => {
if (selectedElements[0]?.isBackground) {
const hasOffset =
['media', 'image', 'video', 'gif'].includes(selectedElements[0].type) &&
hasOffsets({ element: selectedElements[0] });
const normalizedScale = progress(selectedElements[0]?.scale || 0, {
MIN: BG_MIN_SCALE,
MAX: BG_MAX_SCALE,
});
return {
[BACKGROUND_ANIMATION_EFFECTS.PAN.value]: {
tooltip: backgroundAnimationTooltip,
options: [
!hasOffset.bottom && DIRECTION.TOP_TO_BOTTOM,
!hasOffset.left && DIRECTION.RIGHT_TO_LEFT,
!hasOffset.top && DIRECTION.BOTTOM_TO_TOP,
!hasOffset.right && DIRECTION.LEFT_TO_RIGHT,
].filter(Boolean),
},
[BACKGROUND_ANIMATION_EFFECTS.ZOOM.value]: {
tooltip: backgroundAnimationTooltip,
options: [
normalizedScale <= 0.01 && SCALE_DIRECTION.SCALE_IN,
normalizedScale >= 0.99 && SCALE_DIRECTION.SCALE_OUT,
].filter(Boolean),
},
[BACKGROUND_ANIMATION_EFFECTS.PAN_AND_ZOOM.value]: {
tooltip: backgroundAnimationTooltip,
options: [
!hasOffset.bottom && DIRECTION.TOP_TO_BOTTOM,
!hasOffset.left && DIRECTION.RIGHT_TO_LEFT,
!hasOffset.top && DIRECTION.BOTTOM_TO_TOP,
!hasOffset.right && DIRECTION.LEFT_TO_RIGHT,
normalizedScale <= 0.01 && SCALE_DIRECTION.SCALE_IN,
normalizedScale >= 0.99 && SCALE_DIRECTION.SCALE_OUT,
].filter(Boolean),
},
};
}
return {};
}, [selectedElements]);
const selectedEffectTitle = getEffectName(updatedAnimations[0]?.type);
return selectedElements.length > 1 ? null : (
<SimplePanel
name="animation"
title={__('Animation', 'web-stories')}
css={highlight?.showEffect && styles.FLASH}
onAnimationEnd={() => resetHighlight()}
isPersistable={!highlight}
>
<GroupWrapper hasAnimation={selectedEffectTitle}>
<StyledRow>
<EffectChooserDropdown
ref={(node) => {
if (node && highlight?.focus && highlight?.showEffect) {
node.focus();
}
}}
onAnimationSelected={handleAddOrUpdateElementEffect}
onNoEffectSelected={handleRemoveEffect}
isBackgroundEffects={isBackground}
disabledTypeOptionsMap={disabledTypeOptionsMap}
direction={getEffectDirection(updatedAnimations[0])}
selectedEffectType={updatedAnimations[0]?.type}
selectButtonStylesOverride={highlight?.focus && styles.OUTLINE}
disabled={isFirstPage}
/>
</StyledRow>
{updatedAnimations[0] && (
<EffectPanel
animation={updatedAnimations[0]}
onChange={handlePanelChange}
disabledTypeOptionsMap={disabledTypeOptionsMap}
disabled={isFirstPage}
/>
)}
</GroupWrapper>
{isFirstPage && (
<Warning message={DESIGN_COPY.firstPageAnimation.animationPanel} />
)}
</SimplePanel>
);
}
AnimationPanel.propTypes = {
selectedElements: PropTypes.arrayOf(StoryPropTypes.element).isRequired,
selectedElementAnimations: PropTypes.arrayOf(AnimationPropType),
pushUpdateForObject: PropTypes.func.isRequired,
updateAnimationState: PropTypes.func,
};
export default AnimationPanel; | const isBackground =
selectedElements.length === 1 && selectedElements[0].isBackground;
const updatedAnimations = useMemo(() => { |
lachnospiraceaebacteriumnk4a144.py | """
This file offers the methods to automatically retrieve the graph Lachnospiraceae bacterium NK4A144.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def | (
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lachnospiraceae bacterium NK4A144 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnospiraceae bacterium NK4A144 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnospiraceaeBacteriumNk4a144",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| LachnospiraceaeBacteriumNk4a144 |
getOrDownload_test.go | package mongobin
import (
"testing"
"github.com/danbenn/memongo/memongolog"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetOrDownload(t *testing.T) {
afs = afero.Afero{Fs: afero.NewMemMapFs()}
spec := DownloadSpec{
Version: "4.0.5",
Platform: "osx",
SSLBuildNeeded: true,
Arch: "x86_64",
}
cacheDir, err := afs.TempDir("", "")
require.NoError(t, err) |
// First call should download the file
path, err := GetOrDownloadMongod(spec.GetDownloadURL(), cacheDir, memongolog.New(nil, memongolog.LogLevelDebug))
require.NoError(t, err)
assert.Equal(t, cacheDir+"/mongodb-osx-ssl-x86_64-4_0_5_tgz_d50ef2155b/mongod", path)
stat, err := afs.Stat(path)
require.NoError(t, err)
assert.True(t, stat.Size() > 50000000)
assert.True(t, stat.Mode()&0100 != 0)
// Second call should used the cached file
path2, err := GetOrDownloadMongod(spec.GetDownloadURL(), cacheDir, memongolog.New(nil, memongolog.LogLevelDebug))
require.NoError(t, err)
assert.Equal(t, path, path2)
stat2, err := afs.Stat(path2)
require.NoError(t, err)
assert.Equal(t, stat.ModTime(), stat2.ModTime())
} | |
reactive-array.js | var ReactiveArray,
extend = function(child, parent) {
for (var key in parent) {
if (hasProp.call(parent, key)) child[key] = parent[key];
}
function ctor() {
this.constructor = child;
}
ctor.prototype = parent.prototype;
child.prototype = new ctor();
child.__super__ = parent.prototype;
return child;
},
hasProp = {}.hasOwnProperty;
ReactiveArray = (function(superClass) {
var isArray;
extend(ReactiveArray, superClass);
isArray = function(obj) {
return obj instanceof Array;
};
function ReactiveArray(p1, p2) {
var dep, item, j, len, pause;
dep = null;
pause = false;
this.changed = function() {
if (dep && !pause) {
return dep.changed();
}
};
this.depend = function() {
return dep.depend();
};
if (isArray(p1)) {
for (j = 0, len = p1.length; j < len; j++) {
item = p1[j];
this.push(item);
}
dep = p2;
} else {
dep = p1;
}
this.pause = function() {
return (pause = true);
};
this.resume = function() {
pause = false;
return this.changed();
};
}
ReactiveArray.prototype.array = function() {
this.depend();
return Array.prototype.slice.call(this);
};
ReactiveArray.prototype.list = function() {
this.depend();
return this;
};
ReactiveArray.prototype.depend = function() {
this.depend();
return this;
};
ReactiveArray.prototype.push = function() {
var item;
item = ReactiveArray.__super__.push.apply(this, arguments);
this.changed();
return item;
};
ReactiveArray.prototype.unshift = function() {
var item;
item = ReactiveArray.__super__.unshift.apply(this, arguments);
this.changed();
return item;
};
ReactiveArray.prototype.pop = function() {
var item;
item = ReactiveArray.__super__.pop.apply(this, arguments);
this.changed();
return item;
};
ReactiveArray.prototype.shift = function() {
var item;
item = ReactiveArray.__super__.shift.apply(this, arguments);
this.changed();
return item;
};
ReactiveArray.prototype.remove = function(valueOrPredicate) {
var i, predicate, removedValues, underlyingArray, value;
underlyingArray = this;
removedValues = [];
predicate =
typeof valueOrPredicate === "function"
? valueOrPredicate
: function(value) {
return value === valueOrPredicate;
};
i = 0;
while (i < underlyingArray.length) {
value = underlyingArray[i];
if (predicate(value)) {
removedValues.push(value);
underlyingArray.splice(i, 1);
i--;
}
i++;
}
if (removedValues.length) {
this.changed();
}
return removedValues;
};
ReactiveArray.prototype.clear = function() {
while (this.length) {
this.pop();
}
this.changed();
return this;
};
ReactiveArray.prototype.concat = function() {
var a, j, len, ret;
ret = this.array();
for (j = 0, len = arguments.length; j < len; j++) {
a = arguments[j];
if (a instanceof ReactiveArray) {
ret = ret.concat(a.array());
} else {
ret = ret.concat(a);
}
}
return new ReactiveArray(ret);
};
ReactiveArray.prototype.indexOf = function() {
this.depend();
return ReactiveArray.__super__.indexOf.apply(this, arguments);
};
ReactiveArray.prototype.join = function() {
this.depend();
return ReactiveArray.__super__.join.apply(this, arguments);
};
ReactiveArray.prototype.lastIndexOf = function() {
this.depend();
return ReactiveArray.__super__.lastIndexOf.apply(this, arguments);
};
ReactiveArray.prototype.reverse = function() {
ReactiveArray.__super__.reverse.apply(this, arguments);
this.changed();
return this;
};
ReactiveArray.prototype.sort = function() {
ReactiveArray.__super__.sort.apply(this, arguments);
this.changed();
return this; | ret = ReactiveArray.__super__.splice.apply(this, arguments);
this.changed();
return ret;
};
return ReactiveArray;
})(Array);
export default ReactiveArray; | };
ReactiveArray.prototype.splice = function() {
var ret; |
task_run.go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/aws-sdk-go/aws/arn"
awscloudformation "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/logging"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/ecr"
awsecs "github.com/aws/copilot-cli/internal/pkg/aws/ecs"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/ecs"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/task"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/dustin/go-humanize/english"
"github.com/google/shlex"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
const (
appEnvOptionNone = "None (run in default VPC)"
defaultDockerfilePath = "Dockerfile"
imageTagLatest = "latest"
shortTaskIDLength = 8
)
const (
workloadTypeJob = "job"
workloadTypeSvc = "svc"
workloadTypeInvalid = "invalid"
)
const (
fmtImageURI = "%s:%s"
)
var (
errNumNotPositive = errors.New("number of tasks must be positive")
errCPUNotPositive = errors.New("CPU units must be positive")
errMemNotPositive = errors.New("memory must be positive")
)
var (
taskRunAppPrompt = fmt.Sprintf("In which %s would you like to run this %s?", color.Emphasize("application"), color.Emphasize("task"))
taskRunEnvPrompt = fmt.Sprintf("In which %s would you like to run this %s?", color.Emphasize("environment"), color.Emphasize("task"))
taskRunAppPromptHelp = fmt.Sprintf(`Task will be deployed to the selected application.
Select %s to run the task in your default VPC instead of any existing application.`, color.Emphasize(appEnvOptionNone))
taskRunEnvPromptHelp = fmt.Sprintf(`Task will be deployed to the selected environment.
Select %s to run the task in your default VPC instead of any existing environment.`, color.Emphasize(appEnvOptionNone))
)
type runTaskVars struct {
count int
cpu int
memory int
groupName string
image string
dockerfilePath string
imageTag string
taskRole string
executionRole string
cluster string
subnets []string
securityGroups []string
env string
appName string
useDefaultSubnetsAndCluster bool
envVars map[string]string
secrets map[string]string
command string
entrypoint string
resourceTags map[string]string
follow bool
generateCommandTarget string
}
type runTaskOpts struct {
runTaskVars
isDockerfileSet bool
nFlag int
// Interfaces to interact with dependencies.
fs afero.Fs
store store
sel appEnvSelector
spinner progress
// Fields below are configured at runtime.
deployer taskDeployer
repository repositoryService
runner taskRunner
eventsWriter eventsWriter
defaultClusterGetter defaultClusterGetter
publicIPGetter publicIPGetter
provider sessionProvider
sess *session.Session
targetEnvironment *config.Environment
// Configurer functions.
configureRuntimeOpts func() error
configureRepository func() error
// NOTE: configureEventsWriter is only called when tailing logs (i.e. --follow is specified)
configureEventsWriter func(tasks []*task.Task)
configureECSServiceDescriber func(session *session.Session) ecs.ECSServiceDescriber
configureServiceDescriber func(session *session.Session) ecs.ServiceDescriber
configureJobDescriber func(session *session.Session) ecs.JobDescriber
// Functions to generate a task run command.
runTaskRequestFromECSService func(client ecs.ECSServiceDescriber, cluster, service string) (*ecs.RunTaskRequest, error)
runTaskRequestFromService func(client ecs.ServiceDescriber, app, env, svc string) (*ecs.RunTaskRequest, error)
runTaskRequestFromJob func(client ecs.JobDescriber, app, env, job string) (*ecs.RunTaskRequest, error)
}
func newTaskRunOpts(vars runTaskVars) (*runTaskOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("new config store: %w", err)
}
opts := runTaskOpts{
runTaskVars: vars, | fs: &afero.Afero{Fs: afero.NewOsFs()},
store: store,
sel: selector.NewSelect(prompt.New(), store),
spinner: termprogress.NewSpinner(log.DiagnosticWriter),
provider: sessions.NewProvider(),
}
opts.configureRuntimeOpts = func() error {
opts.runner, err = opts.configureRunner()
if err != nil {
return fmt.Errorf("configure task runner: %w", err)
}
opts.deployer = cloudformation.New(opts.sess)
opts.defaultClusterGetter = awsecs.New(opts.sess)
opts.publicIPGetter = ec2.New(opts.sess)
return nil
}
opts.configureRepository = func() error {
repoName := fmt.Sprintf(deploy.FmtTaskECRRepoName, opts.groupName)
registry := ecr.New(opts.sess)
repo, err := repository.New(repoName, registry)
if err != nil {
return fmt.Errorf("initialize repository %s: %w", repoName, err)
}
opts.repository = repo
return nil
}
opts.configureEventsWriter = func(tasks []*task.Task) {
opts.eventsWriter = logging.NewTaskClient(opts.sess, opts.groupName, tasks)
}
opts.configureECSServiceDescriber = func(session *session.Session) ecs.ECSServiceDescriber {
return awsecs.New(session)
}
opts.configureServiceDescriber = func(session *session.Session) ecs.ServiceDescriber {
return ecs.New(session)
}
opts.configureJobDescriber = func(session *session.Session) ecs.JobDescriber {
return ecs.New(session)
}
opts.runTaskRequestFromECSService = ecs.RunTaskRequestFromECSService
opts.runTaskRequestFromService = ecs.RunTaskRequestFromService
opts.runTaskRequestFromJob = ecs.RunTaskRequestFromJob
return &opts, nil
}
func (o *runTaskOpts) configureRunner() (taskRunner, error) {
vpcGetter := ec2.New(o.sess)
ecsService := awsecs.New(o.sess)
if o.env != "" {
deployStore, err := deploy.NewStore(o.store)
if err != nil {
return nil, fmt.Errorf("connect to copilot deploy store: %w", err)
}
d, err := describe.NewEnvDescriber(describe.NewEnvDescriberConfig{
App: o.appName,
Env: o.env,
ConfigStore: o.store,
DeployStore: deployStore,
EnableResources: false, // We don't need to show detailed resources.
})
if err != nil {
return nil, fmt.Errorf("create describer for environment %s in application %s: %w", o.env, o.appName, err)
}
return &task.EnvRunner{
Count: o.count,
GroupName: o.groupName,
App: o.appName,
Env: o.env,
VPCGetter: vpcGetter,
ClusterGetter: ecs.New(o.sess),
Starter: ecsService,
EnvironmentDescriber: d,
}, nil
}
return &task.ConfigRunner{
Count: o.count,
GroupName: o.groupName,
Cluster: o.cluster,
Subnets: o.subnets,
SecurityGroups: o.securityGroups,
VPCGetter: vpcGetter,
ClusterGetter: ecsService,
Starter: ecsService,
}, nil
}
func (o *runTaskOpts) configureSessAndEnv() error {
var sess *session.Session
var env *config.Environment
if o.env != "" {
var err error
env, err = o.targetEnv()
if err != nil {
return err
}
sess, err = o.provider.FromRole(env.ManagerRoleARN, env.Region)
if err != nil {
return fmt.Errorf("get session from role %s and region %s: %w", env.ManagerRoleARN, env.Region, err)
}
} else {
var err error
sess, err = o.provider.Default()
if err != nil {
return fmt.Errorf("get default session: %w", err)
}
}
o.targetEnvironment = env
o.sess = sess
return nil
}
// Validate returns an error if the flag values passed by the user are invalid.
func (o *runTaskOpts) Validate() error {
if o.generateCommandTarget != "" {
if o.nFlag >= 2 {
return errors.New("cannot specify `--generate-cmd` with any other flag")
}
}
if o.count <= 0 {
return errNumNotPositive
}
if o.cpu <= 0 {
return errCPUNotPositive
}
if o.memory <= 0 {
return errMemNotPositive
}
if o.groupName != "" {
if err := basicNameValidation(o.groupName); err != nil {
return err
}
}
if o.image != "" && o.isDockerfileSet {
return errors.New("cannot specify both `--image` and `--dockerfile`")
}
if o.isDockerfileSet {
if _, err := o.fs.Stat(o.dockerfilePath); err != nil {
return err
}
}
if err := o.validateFlagsWithCluster(); err != nil {
return err
}
if err := o.validateFlagsWithDefaultCluster(); err != nil {
return err
}
if err := o.validateFlagsWithSubnets(); err != nil {
return err
}
if err := o.validateFlagsWithSecurityGroups(); err != nil {
return err
}
if o.appName != "" {
if err := o.validateAppName(); err != nil {
return err
}
}
if o.env != "" {
if err := o.validateEnvName(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) validateFlagsWithCluster() error {
if o.cluster == "" {
return nil
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--app` and `--cluster`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--env` and `--cluster`")
}
if o.useDefaultSubnetsAndCluster {
return fmt.Errorf("cannot specify both `--default` and `--cluster`")
}
return nil
}
func (o *runTaskOpts) validateFlagsWithDefaultCluster() error {
if !o.useDefaultSubnetsAndCluster {
return nil
}
if o.subnets != nil {
return fmt.Errorf("cannot specify both `--subnets` and `--default`")
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--app` and `--default`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--env` and `--default`")
}
return nil
}
func (o *runTaskOpts) validateFlagsWithSubnets() error {
if o.subnets == nil {
return nil
}
if o.useDefaultSubnetsAndCluster {
return fmt.Errorf("cannot specify both `--subnets` and `--default`")
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--subnets` and `--app`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--subnets` and `--env`")
}
return nil
}
func (o *runTaskOpts) validateFlagsWithSecurityGroups() error {
if o.securityGroups == nil {
return nil
}
if o.appName != "" {
return fmt.Errorf("cannot specify both `--security-groups` and `--app`")
}
if o.env != "" {
return fmt.Errorf("cannot specify both `--security-groups` and `--env`")
}
return nil
}
// Ask prompts the user for any required or important fields that are not provided.
func (o *runTaskOpts) Ask() error {
if o.generateCommandTarget != "" {
return nil
}
if o.shouldPromptForAppEnv() {
if err := o.askAppName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) shouldPromptForAppEnv() bool {
// NOTE: if security groups are specified but subnets are not, then we use the default subnets with the
// specified security groups.
useDefault := o.useDefaultSubnetsAndCluster || (o.securityGroups != nil && o.subnets == nil && o.cluster == "")
useConfig := o.subnets != nil || o.cluster != ""
// if user hasn't specified that they want to use the default subnets, and that they didn't provide specific subnets
// that they want to use, then we prompt.
return !useDefault && !useConfig
}
// Execute deploys and runs the task.
func (o *runTaskOpts) Execute() error {
if o.generateCommandTarget != "" {
return o.generateCommand()
}
if o.groupName == "" {
dir, err := os.Getwd()
if err != nil {
log.Errorf("Cannot retrieve working directory, please use --%s to specify a task group name.\n", taskGroupNameFlag)
return fmt.Errorf("get working directory: %v", err)
}
o.groupName = strings.ToLower(filepath.Base(dir))
}
// NOTE: all runtime options must be configured only after session is configured
if err := o.configureSessAndEnv(); err != nil {
return err
}
if err := o.configureRuntimeOpts(); err != nil {
return err
}
if o.env == "" && o.cluster == "" {
hasDefaultCluster, err := o.defaultClusterGetter.HasDefaultCluster()
if err != nil {
return fmt.Errorf(`find "default" cluster to deploy the task to: %v`, err)
}
if !hasDefaultCluster {
log.Errorf(
"Looks like there is no \"default\" cluster in your region!\nPlease run %s to create the cluster first, and then re-run %s.\n",
color.HighlightCode("aws ecs create-cluster"),
color.HighlightCode("copilot task run"),
)
return errors.New(`cannot find a "default" cluster to deploy the task to`)
}
}
if err := o.deployTaskResources(); err != nil {
return err
}
// NOTE: repository has to be configured only after task resources are deployed
if err := o.configureRepository(); err != nil {
return err
}
// NOTE: if image is not provided, then we build the image and push to ECR repo
if o.image == "" {
if err := o.buildAndPushImage(); err != nil {
return err
}
tag := imageTagLatest
if o.imageTag != "" {
tag = o.imageTag
}
o.image = fmt.Sprintf(fmtImageURI, o.repository.URI(), tag)
if err := o.updateTaskResources(); err != nil {
return err
}
}
tasks, err := o.runTask()
if err != nil {
return err
}
o.showPublicIPs(tasks)
if o.follow {
o.configureEventsWriter(tasks)
if err := o.displayLogStream(); err != nil {
return err
}
}
return nil
}
func (o *runTaskOpts) generateCommand() error {
command, err := o.runTaskCommand()
if err != nil {
return err
}
log.Infoln(command.CLIString())
return nil
}
func (o *runTaskOpts) runTaskCommand() (cliStringer, error) {
var cmd cliStringer
sess, err := o.provider.Default()
if err != nil {
return nil, fmt.Errorf("get default session: %s", err)
}
if arn.IsARN(o.generateCommandTarget) {
clusterName, serviceName, err := o.parseARN()
if err != nil {
return nil, err
}
return o.runTaskCommandFromECSService(sess, clusterName, serviceName)
}
parts := strings.Split(o.generateCommandTarget, "/")
switch len(parts) {
case 2:
clusterName, serviceName := parts[0], parts[1]
cmd, err = o.runTaskCommandFromECSService(sess, clusterName, serviceName)
if err != nil {
return nil, err
}
case 3:
appName, envName, workloadName := parts[0], parts[1], parts[2]
cmd, err = o.runTaskCommandFromWorkload(sess, appName, envName, workloadName)
if err != nil {
return nil, err
}
default:
return nil, errors.New("invalid input to --generate-cmd: must be of format <cluster>/<service> or <app>/<env>/<workload>")
}
return cmd, nil
}
func (o *runTaskOpts) parseARN() (string, string, error) {
svcARN := awsecs.ServiceArn(o.generateCommandTarget)
clusterName, err := svcARN.ClusterName()
if err != nil {
return "", "", fmt.Errorf("extract cluster name from arn %s: %w", svcARN, err)
}
serviceName, err := svcARN.ServiceName()
if err != nil {
return "", "", fmt.Errorf("extract service name from arn %s: %w", svcARN, err)
}
return clusterName, serviceName, nil
}
func (o *runTaskOpts) runTaskCommandFromECSService(sess *session.Session, clusterName, serviceName string) (cliStringer, error) {
cmd, err := o.runTaskRequestFromECSService(o.configureECSServiceDescriber(sess), clusterName, serviceName)
if err != nil {
var errMultipleContainers *ecs.ErrMultipleContainersInTaskDef
if errors.As(err, &errMultipleContainers) {
log.Errorln("`copilot task run` does not support running more than one container.")
}
return nil, fmt.Errorf("generate task run command from ECS service %s: %w", clusterName+"/"+serviceName, err)
}
return cmd, nil
}
func (o *runTaskOpts) runTaskCommandFromWorkload(sess *session.Session, appName, envName, workloadName string) (cliStringer, error) {
workloadType, err := o.workloadType(appName, workloadName)
if err != nil {
return nil, err
}
var cmd cliStringer
switch workloadType {
case workloadTypeJob:
cmd, err = o.runTaskRequestFromJob(o.configureJobDescriber(sess), appName, envName, workloadName)
if err != nil {
return nil, fmt.Errorf("generate task run command from job %s of application %s deployed in environment %s: %w", workloadName, appName, envName, err)
}
case workloadTypeSvc:
cmd, err = o.runTaskRequestFromService(o.configureServiceDescriber(sess), appName, envName, workloadName)
if err != nil {
return nil, fmt.Errorf("generate task run command from service %s of application %s deployed in environment %s: %w", workloadName, appName, envName, err)
}
}
return cmd, nil
}
func (o *runTaskOpts) workloadType(appName, workloadName string) (string, error) {
_, err := o.store.GetJob(appName, workloadName)
if err == nil {
return workloadTypeJob, nil
}
var errNoSuchJob *config.ErrNoSuchJob
if !errors.As(err, &errNoSuchJob) {
return "", fmt.Errorf("determine whether workload %s is a job: %w", workloadName, err)
}
_, err = o.store.GetService(appName, workloadName)
if err == nil {
return workloadTypeSvc, nil
}
var errNoSuchService *config.ErrNoSuchService
if !errors.As(err, &errNoSuchService) {
return "", fmt.Errorf("determine whether workload %s is a service: %w", workloadName, err)
}
return workloadTypeInvalid, fmt.Errorf("workload %s is neither a service nor a job", workloadName)
}
func (o *runTaskOpts) displayLogStream() error {
if err := o.eventsWriter.WriteEventsUntilStopped(); err != nil {
return fmt.Errorf("write events: %w", err)
}
log.Infof("%s %s stopped.\n",
english.PluralWord(o.count, "Task", ""),
english.PluralWord(o.count, "has", "have"))
return nil
}
func (o *runTaskOpts) runTask() ([]*task.Task, error) {
o.spinner.Start(fmt.Sprintf("Waiting for %s to be running for %s.", english.Plural(o.count, "task", ""), o.groupName))
tasks, err := o.runner.Run()
if err != nil {
o.spinner.Stop(log.Serrorf("Failed to run %s.\n\n", o.groupName))
return nil, fmt.Errorf("run task %s: %w", o.groupName, err)
}
o.spinner.Stop(log.Ssuccessf("%s %s %s running.\n\n", english.PluralWord(o.count, "Task", ""), o.groupName, english.PluralWord(o.count, "is", "are")))
return tasks, nil
}
func (o *runTaskOpts) showPublicIPs(tasks []*task.Task) {
publicIPs := make(map[string]string)
for _, t := range tasks {
if t.ENI == "" {
continue
}
ip, err := o.publicIPGetter.PublicIP(t.ENI) // We will just not show the ip address if an error occurs.
if err == nil {
publicIPs[t.TaskARN] = ip
}
}
if len(publicIPs) == 0 {
return
}
log.Infof("%s associated with the %s %s:\n",
english.PluralWord(len(publicIPs), "The public IP", "Public IPs"),
english.PluralWord(len(publicIPs), "task", "tasks"),
english.PluralWord(len(publicIPs), "is", "are"))
for taskARN, ip := range publicIPs {
if len(taskARN) >= shortTaskIDLength {
taskARN = taskARN[len(taskARN)-shortTaskIDLength:]
}
log.Infof("- %s (for %s)\n", ip, taskARN)
}
}
func (o *runTaskOpts) buildAndPushImage() error {
var additionalTags []string
if o.imageTag != "" {
additionalTags = append(additionalTags, o.imageTag)
}
if _, err := o.repository.BuildAndPush(dockerengine.New(exec.NewCmd()), &dockerengine.BuildArguments{
Dockerfile: o.dockerfilePath,
Context: filepath.Dir(o.dockerfilePath),
Tags: append([]string{imageTagLatest}, additionalTags...),
}); err != nil {
return fmt.Errorf("build and push image: %w", err)
}
return nil
}
func (o *runTaskOpts) deployTaskResources() error {
if err := o.deploy(); err != nil {
return fmt.Errorf("provision resources for task %s: %w", o.groupName, err)
}
return nil
}
func (o *runTaskOpts) updateTaskResources() error {
if err := o.deploy(); err != nil {
return fmt.Errorf("update resources for task %s: %w", o.groupName, err)
}
return nil
}
func (o *runTaskOpts) deploy() error {
var deployOpts []awscloudformation.StackOption
if o.env != "" {
deployOpts = []awscloudformation.StackOption{awscloudformation.WithRoleARN(o.targetEnvironment.ExecutionRoleARN)}
}
entrypoint, err := shlex.Split(o.entrypoint)
if err != nil {
return fmt.Errorf("split entrypoint %s into tokens using shell-style rules: %w", o.entrypoint, err)
}
command, err := shlex.Split(o.command)
if err != nil {
return fmt.Errorf("split command %s into tokens using shell-style rules: %w", o.command, err)
}
input := &deploy.CreateTaskResourcesInput{
Name: o.groupName,
CPU: o.cpu,
Memory: o.memory,
Image: o.image,
TaskRole: o.taskRole,
ExecutionRole: o.executionRole,
Command: command,
EntryPoint: entrypoint,
EnvVars: o.envVars,
Secrets: o.secrets,
App: o.appName,
Env: o.env,
AdditionalTags: o.resourceTags,
}
return o.deployer.DeployTask(os.Stderr, input, deployOpts...)
}
func (o *runTaskOpts) validateAppName() error {
if _, err := o.store.GetApplication(o.appName); err != nil {
return fmt.Errorf("get application: %w", err)
}
return nil
}
func (o *runTaskOpts) validateEnvName() error {
if o.appName != "" {
if _, err := o.targetEnv(); err != nil {
return err
}
} else {
return errNoAppInWorkspace
}
return nil
}
func (o *runTaskOpts) askAppName() error {
if o.appName != "" {
return nil
}
// If the application is empty then the user wants to run in the default VPC. Do not prompt for an environment name.
app, err := o.sel.Application(taskRunAppPrompt, taskRunAppPromptHelp, appEnvOptionNone)
if err != nil {
return fmt.Errorf("ask for application: %w", err)
}
if app == appEnvOptionNone {
return nil
}
o.appName = app
return nil
}
func (o *runTaskOpts) askEnvName() error {
if o.env != "" {
return nil
}
// If the application is empty then the user wants to run in the default VPC. Do not prompt for an environment name.
if o.appName == "" || o.subnets != nil {
return nil
}
env, err := o.sel.Environment(taskRunEnvPrompt, taskRunEnvPromptHelp, o.appName, appEnvOptionNone)
if err != nil {
return fmt.Errorf("ask for environment: %w", err)
}
if env == appEnvOptionNone {
return nil
}
o.env = env
return nil
}
func (o *runTaskOpts) targetEnv() (*config.Environment, error) {
env, err := o.store.GetEnvironment(o.appName, o.env)
if err != nil {
return nil, fmt.Errorf("get environment %s config: %w", o.env, err)
}
return env, nil
}
// BuildTaskRunCmd build the command for running a new task
func BuildTaskRunCmd() *cobra.Command {
vars := runTaskVars{}
cmd := &cobra.Command{
Use: "run",
Short: "Run a one-off task on Amazon ECS.",
Example: `
Run a task using your local Dockerfile and display log streams after the task is running.
You will be prompted to specify an environment for the tasks to run in.
/code $ copilot task run
Run a task named "db-migrate" in the "test" environment under the current workspace.
/code $ copilot task run -n db-migrate --env test
Run 4 tasks with 2GB memory, an existing image, and a custom task role.
/code $ copilot task run --count 4 --memory 2048 --image=rds-migrate --task-role migrate-role
Run a task with environment variables.
/code $ copilot task run --env-vars name=myName,user=myUser
Run a task using the current workspace with specific subnets and security groups.
/code $ copilot task run --subnets subnet-123,subnet-456 --security-groups sg-123,sg-456
Run a task with a command.
/code $ copilot task run --command "python migrate-script.py"`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newTaskRunOpts(vars)
if err != nil {
return err
}
opts.nFlag = cmd.Flags().NFlag()
if cmd.Flags().Changed(dockerFileFlag) {
opts.isDockerfileSet = true
}
return run(opts)
}),
}
cmd.Flags().IntVar(&vars.count, countFlag, 1, countFlagDescription)
cmd.Flags().IntVar(&vars.cpu, cpuFlag, 256, cpuFlagDescription)
cmd.Flags().IntVar(&vars.memory, memoryFlag, 512, memoryFlagDescription)
cmd.Flags().StringVarP(&vars.groupName, taskGroupNameFlag, nameFlagShort, "", taskGroupFlagDescription)
cmd.Flags().StringVarP(&vars.image, imageFlag, imageFlagShort, "", imageFlagDescription)
cmd.Flags().StringVar(&vars.dockerfilePath, dockerFileFlag, defaultDockerfilePath, dockerFileFlagDescription)
cmd.Flags().StringVar(&vars.imageTag, imageTagFlag, "", taskImageTagFlagDescription)
cmd.Flags().StringVar(&vars.taskRole, taskRoleFlag, "", taskRoleFlagDescription)
cmd.Flags().StringVar(&vars.executionRole, executionRoleFlag, "", executionRoleFlagDescription)
cmd.Flags().StringVar(&vars.appName, appFlag, "", taskAppFlagDescription)
cmd.Flags().StringVar(&vars.env, envFlag, "", taskEnvFlagDescription)
cmd.Flags().StringVar(&vars.cluster, clusterFlag, "", clusterFlagDescription)
cmd.Flags().StringSliceVar(&vars.subnets, subnetsFlag, nil, subnetsFlagDescription)
cmd.Flags().StringSliceVar(&vars.securityGroups, securityGroupsFlag, nil, securityGroupsFlagDescription)
cmd.Flags().BoolVar(&vars.useDefaultSubnetsAndCluster, taskDefaultFlag, false, taskRunDefaultFlagDescription)
cmd.Flags().StringToStringVar(&vars.envVars, envVarsFlag, nil, envVarsFlagDescription)
cmd.Flags().StringToStringVar(&vars.secrets, secretsFlag, nil, secretsFlagDescription)
cmd.Flags().StringVar(&vars.command, commandFlag, "", runCommandFlagDescription)
cmd.Flags().StringVar(&vars.entrypoint, entrypointFlag, "", entrypointFlagDescription)
cmd.Flags().StringToStringVar(&vars.resourceTags, resourceTagsFlag, nil, resourceTagsFlagDescription)
cmd.Flags().BoolVar(&vars.follow, followFlag, false, followFlagDescription)
cmd.Flags().StringVar(&vars.generateCommandTarget, generateCommandFlag, "", generateCommandFlagDescription)
return cmd
} | |
cluster_test.go | // Copyright 2016 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster_test
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/coreos/go-semver/semver"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/kvproto/pkg/replication_modepb"
"github.com/tikv/pd/pkg/dashboard"
"github.com/tikv/pd/pkg/mock/mockid"
"github.com/tikv/pd/pkg/testutil"
"github.com/tikv/pd/server"
"github.com/tikv/pd/server/cluster"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/server/core"
"github.com/tikv/pd/server/core/storelimit"
"github.com/tikv/pd/server/kv"
syncer "github.com/tikv/pd/server/region_syncer"
"github.com/tikv/pd/server/schedule/operator"
"github.com/tikv/pd/tests"
)
func Test(t *testing.T) {
TestingT(t)
}
const (
initEpochVersion uint64 = 1
initEpochConfVer uint64 = 1
testMetaStoreAddr = "127.0.0.1:12345"
testStoreAddr = "127.0.0.1:0"
)
var _ = Suite(&clusterTestSuite{})
type clusterTestSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *clusterTestSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
server.EnableZap = true
// to prevent GetStorage
dashboard.SetCheckInterval(30 * time.Minute)
}
func (s *clusterTestSuite) TearDownSuite(c *C) {
s.cancel()
}
type testErrorKV struct {
kv.Base
}
func (kv *testErrorKV) Save(key, value string) error {
return errors.New("save failed")
}
func (s *clusterTestSuite) TestBootstrap(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
// IsBootstrapped returns false.
req := newIsBootstrapRequest(clusterID)
resp, err := grpcPDClient.IsBootstrapped(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp, NotNil)
c.Assert(resp.GetBootstrapped(), IsFalse)
// Bootstrap the cluster.
bootstrapCluster(c, clusterID, grpcPDClient)
// IsBootstrapped returns true.
req = newIsBootstrapRequest(clusterID)
resp, err = grpcPDClient.IsBootstrapped(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetBootstrapped(), IsTrue)
// check bootstrapped error.
reqBoot := newBootstrapRequest(clusterID)
respBoot, err := grpcPDClient.Bootstrap(context.Background(), reqBoot)
c.Assert(err, IsNil)
c.Assert(respBoot.GetHeader().GetError(), NotNil)
c.Assert(respBoot.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_ALREADY_BOOTSTRAPPED)
}
func (s *clusterTestSuite) TestGetPutConfig(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
// Get region.
region := getRegion(c, clusterID, grpcPDClient, []byte("abc"))
c.Assert(region.GetPeers(), HasLen, 1)
peer := region.GetPeers()[0]
// Get region by id.
regionByID := getRegionByID(c, clusterID, grpcPDClient, region.GetId())
c.Assert(region, DeepEquals, regionByID)
r := core.NewRegionInfo(region, region.Peers[0], core.SetApproximateSize(30))
err = tc.HandleRegionHeartbeat(r)
c.Assert(err, IsNil)
// Get store.
storeID := peer.GetStoreId()
store := getStore(c, clusterID, grpcPDClient, storeID)
// Update store.
store.Address = "127.0.0.1:1"
testPutStore(c, clusterID, rc, grpcPDClient, store)
// Remove store.
testRemoveStore(c, clusterID, rc, grpcPDClient, store)
// Update cluster config.
req := &pdpb.PutClusterConfigRequest{
Header: testutil.NewRequestHeader(clusterID),
Cluster: &metapb.Cluster{
Id: clusterID,
MaxPeerCount: 5,
},
}
resp, err := grpcPDClient.PutClusterConfig(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp, NotNil)
meta := getClusterConfig(c, clusterID, grpcPDClient)
c.Assert(meta.GetMaxPeerCount(), Equals, uint32(5))
}
func testPutStore(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) {
// Update store.
_, err := putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
updatedStore := getStore(c, clusterID, grpcPDClient, store.GetId())
c.Assert(updatedStore, DeepEquals, store)
// Update store again.
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
rc.AllocID()
id, err := rc.AllocID()
c.Assert(err, IsNil)
// Put new store with a duplicated address when old store is up will fail.
_, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id)))
c.Assert(err, NotNil)
id, err = rc.AllocID()
c.Assert(err, IsNil)
// Put new store with a duplicated address when old store is offline will fail.
resetStoreState(c, rc, store.GetId(), metapb.StoreState_Offline)
_, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id)))
c.Assert(err, NotNil)
id, err = rc.AllocID()
c.Assert(err, IsNil)
// Put new store with a duplicated address when old store is tombstone is OK.
resetStoreState(c, rc, store.GetId(), metapb.StoreState_Tombstone)
rc.GetStore(store.GetId())
_, err = putStore(grpcPDClient, clusterID, newMetaStore(id, store.GetAddress(), "2.1.0", metapb.StoreState_Up, getTestDeployPath(id)))
c.Assert(err, IsNil)
id, err = rc.AllocID()
c.Assert(err, IsNil)
deployPath := getTestDeployPath(id)
// Put a new store.
_, err = putStore(grpcPDClient, clusterID, newMetaStore(id, testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, deployPath))
c.Assert(err, IsNil)
s := rc.GetStore(id).GetMeta()
c.Assert(s.DeployPath, Equals, deployPath)
deployPath = fmt.Sprintf("move/test/store%d", id)
_, err = putStore(grpcPDClient, clusterID, newMetaStore(id, testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, deployPath))
c.Assert(err, IsNil)
s = rc.GetStore(id).GetMeta()
c.Assert(s.DeployPath, Equals, deployPath)
// Put an existed store with duplicated address with other old stores.
resetStoreState(c, rc, store.GetId(), metapb.StoreState_Up)
_, err = putStore(grpcPDClient, clusterID, newMetaStore(store.GetId(), testMetaStoreAddr, "2.1.0", metapb.StoreState_Up, getTestDeployPath(store.GetId())))
c.Assert(err, NotNil)
}
func getTestDeployPath(storeID uint64) string {
return fmt.Sprintf("test/store%d", storeID)
}
func | (c *C, rc *cluster.RaftCluster, storeID uint64, state metapb.StoreState) {
store := rc.GetStore(storeID)
c.Assert(store, NotNil)
newStore := store.Clone(core.OfflineStore(false))
if state == metapb.StoreState_Up {
newStore = newStore.Clone(core.UpStore())
} else if state == metapb.StoreState_Tombstone {
newStore = newStore.Clone(core.TombstoneStore())
}
rc.GetCacheCluster().PutStore(newStore)
if state == metapb.StoreState_Offline {
rc.SetStoreLimit(storeID, storelimit.RemovePeer, storelimit.Unlimited)
} else if state == metapb.StoreState_Tombstone {
rc.RemoveStoreLimit(storeID)
}
}
func testStateAndLimit(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store, beforeState metapb.StoreState, run func(*cluster.RaftCluster) error, expectStates ...metapb.StoreState) {
// prepare
storeID := store.GetId()
oc := rc.GetOperatorController()
rc.SetStoreLimit(storeID, storelimit.AddPeer, 60)
rc.SetStoreLimit(storeID, storelimit.RemovePeer, 60)
op := operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{}, operator.OpRegion, operator.AddPeer{ToStore: storeID, PeerID: 3})
oc.AddOperator(op)
op = operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{}, operator.OpRegion, operator.RemovePeer{FromStore: storeID})
oc.AddOperator(op)
resetStoreState(c, rc, store.GetId(), beforeState)
_, isOKBefore := rc.GetAllStoresLimit()[storeID]
// run
err := run(rc)
// judge
_, isOKAfter := rc.GetAllStoresLimit()[storeID]
if len(expectStates) != 0 {
c.Assert(err, IsNil)
expectState := expectStates[0]
c.Assert(getStore(c, clusterID, grpcPDClient, storeID).GetState(), Equals, expectState)
if expectState == metapb.StoreState_Offline {
c.Assert(isOKAfter, IsTrue)
} else if expectState == metapb.StoreState_Tombstone {
c.Assert(isOKAfter, IsFalse)
}
} else {
c.Assert(err, NotNil)
c.Assert(isOKBefore, Equals, isOKAfter)
}
}
func testRemoveStore(c *C, clusterID uint64, rc *cluster.RaftCluster, grpcPDClient pdpb.PDClient, store *metapb.Store) {
{
beforeState := metapb.StoreState_Up // When store is up
// Case 1: RemoveStore should be OK;
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), false)
}, metapb.StoreState_Offline)
// Case 2: RemoveStore with physically destroyed should be OK;
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), true)
}, metapb.StoreState_Offline)
}
{
beforeState := metapb.StoreState_Offline // When store is offline
// Case 1: RemoveStore should be OK;
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), false)
}, metapb.StoreState_Offline)
// Case 2: remove store with physically destroyed should be success
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), true)
}, metapb.StoreState_Offline)
}
{
beforeState := metapb.StoreState_Tombstone // When store is tombstone
// Case 1: RemoveStore should should fail;
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), false)
})
// Case 2: RemoveStore with physically destroyed should fail;
testStateAndLimit(c, clusterID, rc, grpcPDClient, store, beforeState, func(cluster *cluster.RaftCluster) error {
return cluster.RemoveStore(store.GetId(), true)
})
}
{
// Put after removed should return tombstone error.
resp, err := putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
c.Assert(resp.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_STORE_TOMBSTONE)
}
{
// Update after removed should return tombstone error.
req := &pdpb.StoreHeartbeatRequest{
Header: testutil.NewRequestHeader(clusterID),
Stats: &pdpb.StoreStats{StoreId: store.GetId()},
}
resp, err := grpcPDClient.StoreHeartbeat(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetHeader().GetError().GetType(), Equals, pdpb.ErrorType_STORE_TOMBSTONE)
}
}
// Make sure PD will not panic if it start and stop again and again.
func (s *clusterTestSuite) TestRaftClusterRestart(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.Stop()
err = rc.Start(leaderServer.GetServer())
c.Assert(err, IsNil)
rc = leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.Stop()
}
// Make sure PD will not deadlock if it start and stop again and again.
func (s *clusterTestSuite) TestRaftClusterMultipleRestart(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
// add an offline store
storeID, err := leaderServer.GetAllocator().Alloc()
c.Assert(err, IsNil)
store := newMetaStore(storeID, "127.0.0.1:4", "2.1.0", metapb.StoreState_Offline, getTestDeployPath(storeID))
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
err = rc.PutStore(store)
c.Assert(err, IsNil)
c.Assert(tc, NotNil)
// let the job run at small interval
c.Assert(failpoint.Enable("github.com/tikv/pd/server/highFrequencyClusterJobs", `return(true)`), IsNil)
for i := 0; i < 100; i++ {
err = rc.Start(leaderServer.GetServer())
c.Assert(err, IsNil)
time.Sleep(time.Millisecond)
rc = leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.Stop()
}
}
func newMetaStore(storeID uint64, addr, version string, state metapb.StoreState, deployPath string) *metapb.Store {
return &metapb.Store{Id: storeID, Address: addr, Version: version, State: state, DeployPath: deployPath}
}
func (s *clusterTestSuite) TestGetPDMembers(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
req := &pdpb.GetMembersRequest{Header: testutil.NewRequestHeader(clusterID)}
resp, err := grpcPDClient.GetMembers(context.Background(), req)
c.Assert(err, IsNil)
// A more strict test can be found at api/member_test.go
c.Assert(len(resp.GetMembers()), Not(Equals), 0)
}
func (s *clusterTestSuite) TestStoreVersionChange(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
svr := leaderServer.GetServer()
svr.SetClusterVersion("2.0.0")
storeID, err := leaderServer.GetAllocator().Alloc()
c.Assert(err, IsNil)
store := newMetaStore(storeID, "127.0.0.1:4", "2.1.0", metapb.StoreState_Up, getTestDeployPath(storeID))
var wg sync.WaitGroup
c.Assert(failpoint.Enable("github.com/tikv/pd/server/versionChangeConcurrency", `return(true)`), IsNil)
wg.Add(1)
go func() {
defer wg.Done()
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
}()
time.Sleep(100 * time.Millisecond)
svr.SetClusterVersion("1.0.0")
wg.Wait()
v, err := semver.NewVersion("1.0.0")
c.Assert(err, IsNil)
c.Assert(svr.GetClusterVersion(), Equals, *v)
c.Assert(failpoint.Disable("github.com/tikv/pd/server/versionChangeConcurrency"), IsNil)
}
func (s *clusterTestSuite) TestConcurrentHandleRegion(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1", "127.0.1.1:2"}
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.SetStorage(core.NewStorage(kv.NewMemoryKV()))
stores := make([]*metapb.Store, 0, len(storeAddrs))
id := leaderServer.GetAllocator()
for _, addr := range storeAddrs {
storeID, err := id.Alloc()
c.Assert(err, IsNil)
store := newMetaStore(storeID, addr, "2.1.0", metapb.StoreState_Up, getTestDeployPath(storeID))
stores = append(stores, store)
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var wg sync.WaitGroup
// register store and bind stream
for i, store := range stores {
req := &pdpb.StoreHeartbeatRequest{
Header: testutil.NewRequestHeader(clusterID),
Stats: &pdpb.StoreStats{
StoreId: store.GetId(),
Capacity: 1000 * (1 << 20),
Available: 1000 * (1 << 20),
},
}
_, err := leaderServer.GetServer().StoreHeartbeat(context.TODO(), req)
c.Assert(err, IsNil)
stream, err := grpcPDClient.RegionHeartbeat(ctx)
c.Assert(err, IsNil)
peerID, err := id.Alloc()
c.Assert(err, IsNil)
regionID, err := id.Alloc()
c.Assert(err, IsNil)
peer := &metapb.Peer{Id: peerID, StoreId: store.GetId()}
regionReq := &pdpb.RegionHeartbeatRequest{
Header: testutil.NewRequestHeader(clusterID),
Region: &metapb.Region{
Id: regionID,
Peers: []*metapb.Peer{peer},
},
Leader: peer,
}
err = stream.Send(regionReq)
c.Assert(err, IsNil)
// make sure the first store can receive one response
if i == 0 {
wg.Add(1)
}
go func(isReciver bool) {
if isReciver {
_, err := stream.Recv()
c.Assert(err, IsNil)
wg.Done()
}
for {
select {
case <-ctx.Done():
return
default:
stream.Recv()
}
}
}(i == 0)
}
concurrent := 1000
for i := 0; i < concurrent; i++ {
peerID, err := id.Alloc()
c.Assert(err, IsNil)
regionID, err := id.Alloc()
c.Assert(err, IsNil)
region := &metapb.Region{
Id: regionID,
StartKey: []byte(fmt.Sprintf("%5d", i)),
EndKey: []byte(fmt.Sprintf("%5d", i+1)),
Peers: []*metapb.Peer{{Id: peerID, StoreId: stores[0].GetId()}},
RegionEpoch: &metapb.RegionEpoch{
ConfVer: initEpochConfVer,
Version: initEpochVersion,
},
}
if i == 0 {
region.StartKey = []byte("")
} else if i == concurrent-1 {
region.EndKey = []byte("")
}
wg.Add(1)
go func() {
defer wg.Done()
err := rc.HandleRegionHeartbeat(core.NewRegionInfo(region, region.Peers[0]))
c.Assert(err, IsNil)
}()
}
wg.Wait()
}
func (s *clusterTestSuite) TestSetScheduleOpt(c *C) {
// TODO: enable placementrules
tc, err := tests.NewTestCluster(s.ctx, 1, func(cfg *config.Config, svr string) { cfg.Replication.EnablePlacementRules = false })
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
cfg := config.NewConfig()
cfg.Schedule.TolerantSizeRatio = 5
err = cfg.Adjust(nil, false)
c.Assert(err, IsNil)
opt := config.NewPersistOptions(cfg)
c.Assert(err, IsNil)
svr := leaderServer.GetServer()
scheduleCfg := opt.GetScheduleConfig()
replicationCfg := svr.GetReplicationConfig()
persistOptions := svr.GetPersistOptions()
pdServerCfg := persistOptions.GetPDServerConfig()
// PUT GET DELETE succeed
replicationCfg.MaxReplicas = 5
scheduleCfg.MaxSnapshotCount = 10
pdServerCfg.UseRegionStorage = true
typ, labelKey, labelValue := "testTyp", "testKey", "testValue"
c.Assert(svr.SetScheduleConfig(*scheduleCfg), IsNil)
c.Assert(svr.SetPDServerConfig(*pdServerCfg), IsNil)
c.Assert(svr.SetLabelProperty(typ, labelKey, labelValue), IsNil)
c.Assert(svr.SetReplicationConfig(*replicationCfg), IsNil)
c.Assert(persistOptions.GetMaxReplicas(), Equals, 5)
c.Assert(persistOptions.GetMaxSnapshotCount(), Equals, uint64(10))
c.Assert(persistOptions.IsUseRegionStorage(), Equals, true)
c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Key, Equals, "testKey")
c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Value, Equals, "testValue")
c.Assert(svr.DeleteLabelProperty(typ, labelKey, labelValue), IsNil)
c.Assert(len(persistOptions.GetLabelPropertyConfig()[typ]), Equals, 0)
// PUT GET failed
oldStorage := svr.GetStorage()
svr.SetStorage(core.NewStorage(&testErrorKV{}))
replicationCfg.MaxReplicas = 7
scheduleCfg.MaxSnapshotCount = 20
pdServerCfg.UseRegionStorage = false
c.Assert(svr.SetScheduleConfig(*scheduleCfg), NotNil)
c.Assert(svr.SetReplicationConfig(*replicationCfg), NotNil)
c.Assert(svr.SetPDServerConfig(*pdServerCfg), NotNil)
c.Assert(svr.SetLabelProperty(typ, labelKey, labelValue), NotNil)
c.Assert(persistOptions.GetMaxReplicas(), Equals, 5)
c.Assert(persistOptions.GetMaxSnapshotCount(), Equals, uint64(10))
c.Assert(persistOptions.GetPDServerConfig().UseRegionStorage, Equals, true)
c.Assert(len(persistOptions.GetLabelPropertyConfig()[typ]), Equals, 0)
// DELETE failed
svr.SetStorage(oldStorage)
c.Assert(svr.SetReplicationConfig(*replicationCfg), IsNil)
svr.SetStorage(core.NewStorage(&testErrorKV{}))
c.Assert(svr.DeleteLabelProperty(typ, labelKey, labelValue), NotNil)
c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Key, Equals, "testKey")
c.Assert(persistOptions.GetLabelPropertyConfig()[typ][0].Value, Equals, "testValue")
svr.SetStorage(oldStorage)
}
func (s *clusterTestSuite) TestLoadClusterInfo(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
svr := leaderServer.GetServer()
rc := cluster.NewRaftCluster(s.ctx, svr.GetClusterRootPath(), svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient())
// Cluster is not bootstrapped.
rc.InitCluster(svr.GetAllocator(), svr.GetPersistOptions(), svr.GetStorage(), svr.GetBasicCluster())
raftCluster, err := rc.LoadClusterInfo()
c.Assert(err, IsNil)
c.Assert(raftCluster, IsNil)
storage := rc.GetStorage()
basicCluster := rc.GetCacheCluster()
opt := rc.GetOpts()
// Save meta, stores and regions.
n := 10
meta := &metapb.Cluster{Id: 123}
c.Assert(storage.SaveMeta(meta), IsNil)
stores := make([]*metapb.Store, 0, n)
for i := 0; i < n; i++ {
store := &metapb.Store{Id: uint64(i)}
stores = append(stores, store)
}
for _, store := range stores {
c.Assert(storage.SaveStore(store), IsNil)
}
regions := make([]*metapb.Region, 0, n)
for i := uint64(0); i < uint64(n); i++ {
region := &metapb.Region{
Id: i,
StartKey: []byte(fmt.Sprintf("%20d", i)),
EndKey: []byte(fmt.Sprintf("%20d", i+1)),
RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1},
}
regions = append(regions, region)
}
for _, region := range regions {
c.Assert(storage.SaveRegion(region), IsNil)
}
c.Assert(storage.Flush(), IsNil)
raftCluster = cluster.NewRaftCluster(s.ctx, svr.GetClusterRootPath(), svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient())
raftCluster.InitCluster(mockid.NewIDAllocator(), opt, storage, basicCluster)
raftCluster, err = raftCluster.LoadClusterInfo()
c.Assert(err, IsNil)
c.Assert(raftCluster, NotNil)
// Check meta, stores, and regions.
c.Assert(raftCluster.GetConfig(), DeepEquals, meta)
c.Assert(raftCluster.GetStoreCount(), Equals, n)
for _, store := range raftCluster.GetMetaStores() {
c.Assert(store, DeepEquals, stores[store.GetId()])
}
c.Assert(raftCluster.GetRegionCount(), Equals, n)
for _, region := range raftCluster.GetMetaRegions() {
c.Assert(region, DeepEquals, regions[region.GetId()])
}
m := 20
regions = make([]*metapb.Region, 0, n)
for i := uint64(0); i < uint64(m); i++ {
region := &metapb.Region{
Id: i,
StartKey: []byte(fmt.Sprintf("%20d", i)),
EndKey: []byte(fmt.Sprintf("%20d", i+1)),
RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1},
}
regions = append(regions, region)
}
for _, region := range regions {
c.Assert(storage.SaveRegion(region), IsNil)
}
raftCluster.GetStorage().LoadRegionsOnce(raftCluster.GetCacheCluster().PutRegion)
c.Assert(raftCluster.GetRegionCount(), Equals, n)
}
func (s *clusterTestSuite) TestTiFlashWithPlacementRules(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1, func(cfg *config.Config, name string) { cfg.Replication.EnablePlacementRules = false })
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
tiflashStore := &metapb.Store{
Id: 11,
Address: "127.0.0.1:1",
Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}},
Version: "v4.1.0",
}
// cannot put TiFlash node without placement rules
_, err = putStore(grpcPDClient, clusterID, tiflashStore)
c.Assert(err, NotNil)
rep := leaderServer.GetConfig().Replication
rep.EnablePlacementRules = true
svr := leaderServer.GetServer()
err = svr.SetReplicationConfig(rep)
c.Assert(err, IsNil)
_, err = putStore(grpcPDClient, clusterID, tiflashStore)
c.Assert(err, IsNil)
// test TiFlash store limit
expect := map[uint64]config.StoreLimitConfig{11: {AddPeer: 30, RemovePeer: 30}}
c.Assert(svr.GetScheduleConfig().StoreLimit, DeepEquals, expect)
// cannot disable placement rules with TiFlash nodes
rep.EnablePlacementRules = false
err = svr.SetReplicationConfig(rep)
c.Assert(err, NotNil)
err = svr.GetRaftCluster().RemoveStore(11, true)
c.Assert(err, IsNil)
err = svr.SetReplicationConfig(rep)
c.Assert(err, NotNil)
}
func (s *clusterTestSuite) TestReplicationModeStatus(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) {
conf.ReplicationMode.ReplicationMode = "dr-auto-sync"
})
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
req := newBootstrapRequest(clusterID)
res, err := grpcPDClient.Bootstrap(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(res.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in bootstrap response
store := &metapb.Store{Id: 11, Address: "127.0.0.1:1", Version: "v4.1.0"}
putRes, err := putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
c.Assert(putRes.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in putStore response
hbReq := &pdpb.StoreHeartbeatRequest{
Header: testutil.NewRequestHeader(clusterID),
Stats: &pdpb.StoreStats{StoreId: store.GetId()},
}
hbRes, err := grpcPDClient.StoreHeartbeat(context.Background(), hbReq)
c.Assert(err, IsNil)
c.Assert(hbRes.GetReplicationStatus().GetMode(), Equals, replication_modepb.ReplicationMode_DR_AUTO_SYNC) // check status in store heartbeat response
}
func newIsBootstrapRequest(clusterID uint64) *pdpb.IsBootstrappedRequest {
req := &pdpb.IsBootstrappedRequest{
Header: testutil.NewRequestHeader(clusterID),
}
return req
}
func newBootstrapRequest(clusterID uint64) *pdpb.BootstrapRequest {
req := &pdpb.BootstrapRequest{
Header: testutil.NewRequestHeader(clusterID),
Store: &metapb.Store{Id: 1, Address: testStoreAddr},
Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}},
}
return req
}
// helper function to check and bootstrap.
func bootstrapCluster(c *C, clusterID uint64, grpcPDClient pdpb.PDClient) {
req := newBootstrapRequest(clusterID)
_, err := grpcPDClient.Bootstrap(context.Background(), req)
c.Assert(err, IsNil)
}
func putStore(grpcPDClient pdpb.PDClient, clusterID uint64, store *metapb.Store) (*pdpb.PutStoreResponse, error) {
req := &pdpb.PutStoreRequest{
Header: testutil.NewRequestHeader(clusterID),
Store: store,
}
resp, err := grpcPDClient.PutStore(context.Background(), req)
return resp, err
}
func getStore(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, storeID uint64) *metapb.Store {
req := &pdpb.GetStoreRequest{
Header: testutil.NewRequestHeader(clusterID),
StoreId: storeID,
}
resp, err := grpcPDClient.GetStore(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetStore().GetId(), Equals, storeID)
return resp.GetStore()
}
func getRegion(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, regionKey []byte) *metapb.Region {
req := &pdpb.GetRegionRequest{
Header: testutil.NewRequestHeader(clusterID),
RegionKey: regionKey,
}
resp, err := grpcPDClient.GetRegion(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetRegion(), NotNil)
return resp.GetRegion()
}
func getRegionByID(c *C, clusterID uint64, grpcPDClient pdpb.PDClient, regionID uint64) *metapb.Region {
req := &pdpb.GetRegionByIDRequest{
Header: testutil.NewRequestHeader(clusterID),
RegionId: regionID,
}
resp, err := grpcPDClient.GetRegionByID(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetRegion(), NotNil)
return resp.GetRegion()
}
func getClusterConfig(c *C, clusterID uint64, grpcPDClient pdpb.PDClient) *metapb.Cluster {
req := &pdpb.GetClusterConfigRequest{Header: testutil.NewRequestHeader(clusterID)}
resp, err := grpcPDClient.GetClusterConfig(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(resp.GetCluster(), NotNil)
return resp.GetCluster()
}
func (s *clusterTestSuite) TestOfflineStoreLimit(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1"}
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.SetStorage(core.NewStorage(kv.NewMemoryKV()))
id := leaderServer.GetAllocator()
for _, addr := range storeAddrs {
storeID, err := id.Alloc()
c.Assert(err, IsNil)
store := newMetaStore(storeID, addr, "4.0.0", metapb.StoreState_Up, getTestDeployPath(storeID))
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
}
for i := uint64(1); i <= 2; i++ {
r := &metapb.Region{
Id: i,
RegionEpoch: &metapb.RegionEpoch{
ConfVer: 1,
Version: 1,
},
StartKey: []byte{byte(i + 1)},
EndKey: []byte{byte(i + 2)},
Peers: []*metapb.Peer{{Id: i + 10, StoreId: i}},
}
region := core.NewRegionInfo(r, r.Peers[0], core.SetApproximateSize(10))
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, IsNil)
}
oc := rc.GetOperatorController()
opt := rc.GetOpts()
opt.SetAllStoresLimit(storelimit.RemovePeer, 1)
// only can add 5 remove peer operators on store 1
for i := uint64(1); i <= 5; i++ {
op := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1})
c.Assert(oc.AddOperator(op), IsTrue)
c.Assert(oc.RemoveOperator(op), IsTrue)
}
op := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1})
c.Assert(oc.AddOperator(op), IsFalse)
c.Assert(oc.RemoveOperator(op), IsFalse)
// only can add 5 remove peer operators on store 2
for i := uint64(1); i <= 5; i++ {
op := operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2})
c.Assert(oc.AddOperator(op), IsTrue)
c.Assert(oc.RemoveOperator(op), IsTrue)
}
op = operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2})
c.Assert(oc.AddOperator(op), IsFalse)
c.Assert(oc.RemoveOperator(op), IsFalse)
// reset all store limit
opt.SetAllStoresLimit(storelimit.RemovePeer, 2)
// only can add 5 remove peer operators on store 2
for i := uint64(1); i <= 5; i++ {
op := operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2})
c.Assert(oc.AddOperator(op), IsTrue)
c.Assert(oc.RemoveOperator(op), IsTrue)
}
op = operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 2})
c.Assert(oc.AddOperator(op), IsFalse)
c.Assert(oc.RemoveOperator(op), IsFalse)
// offline store 1
rc.SetStoreLimit(1, storelimit.RemovePeer, storelimit.Unlimited)
rc.RemoveStore(1, false)
// can add unlimited remove peer operators on store 1
for i := uint64(1); i <= 30; i++ {
op := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1})
c.Assert(oc.AddOperator(op), IsTrue)
c.Assert(oc.RemoveOperator(op), IsTrue)
}
}
func (s *clusterTestSuite) TestUpgradeStoreLimit(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
defer tc.Destroy()
c.Assert(err, IsNil)
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.SetStorage(core.NewStorage(kv.NewMemoryKV()))
store := newMetaStore(1, "127.0.1.1:0", "4.0.0", metapb.StoreState_Up, "test/store1")
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
r := &metapb.Region{
Id: 1,
RegionEpoch: &metapb.RegionEpoch{
ConfVer: 1,
Version: 1,
},
StartKey: []byte{byte(2)},
EndKey: []byte{byte(3)},
Peers: []*metapb.Peer{{Id: 11, StoreId: uint64(1)}},
}
region := core.NewRegionInfo(r, r.Peers[0], core.SetApproximateSize(10))
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, IsNil)
// restart PD
// Here we use an empty storelimit to simulate the upgrade progress.
opt := rc.GetOpts()
scheduleCfg := opt.GetScheduleConfig()
scheduleCfg.StoreLimit = map[uint64]config.StoreLimitConfig{}
c.Assert(leaderServer.GetServer().SetScheduleConfig(*scheduleCfg), IsNil)
err = leaderServer.Stop()
c.Assert(err, IsNil)
err = leaderServer.Run()
c.Assert(err, IsNil)
oc := rc.GetOperatorController()
// only can add 5 remove peer operators on store 1
for i := uint64(1); i <= 5; i++ {
op := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1})
c.Assert(oc.AddOperator(op), IsTrue)
c.Assert(oc.RemoveOperator(op), IsTrue)
}
op := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{ConfVer: 1, Version: 1}, operator.OpRegion, operator.RemovePeer{FromStore: 1})
c.Assert(oc.AddOperator(op), IsFalse)
c.Assert(oc.RemoveOperator(op), IsFalse)
}
func (s *clusterTestSuite) TestStaleTermHeartbeat(c *C) {
tc, err := tests.NewTestCluster(s.ctx, 1)
c.Assert(err, IsNil)
defer tc.Destroy()
err = tc.RunInitialServers()
c.Assert(err, IsNil)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr())
clusterID := leaderServer.GetClusterID()
bootstrapCluster(c, clusterID, grpcPDClient)
storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1", "127.0.1.1:2"}
rc := leaderServer.GetRaftCluster()
c.Assert(rc, NotNil)
rc.SetStorage(core.NewStorage(kv.NewMemoryKV()))
peers := make([]*metapb.Peer, 0, len(storeAddrs))
id := leaderServer.GetAllocator()
for _, addr := range storeAddrs {
storeID, err := id.Alloc()
c.Assert(err, IsNil)
peerID, err := id.Alloc()
c.Assert(err, IsNil)
store := newMetaStore(storeID, addr, "3.0.0", metapb.StoreState_Up, getTestDeployPath(storeID))
_, err = putStore(grpcPDClient, clusterID, store)
c.Assert(err, IsNil)
peers = append(peers, &metapb.Peer{
Id: peerID,
StoreId: storeID,
})
}
regionReq := &pdpb.RegionHeartbeatRequest{
Header: testutil.NewRequestHeader(clusterID),
Region: &metapb.Region{
Id: 1,
Peers: peers,
StartKey: []byte{byte(2)},
EndKey: []byte{byte(3)},
RegionEpoch: &metapb.RegionEpoch{
ConfVer: 1,
Version: 1,
},
},
Leader: peers[0],
Term: 5,
ApproximateSize: 10,
}
region := core.RegionFromHeartbeat(regionReq)
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, IsNil)
// Transfer leader
regionReq.Term = 6
regionReq.Leader = peers[1]
region = core.RegionFromHeartbeat(regionReq)
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, IsNil)
// issue #3379
regionReq.KeysWritten = uint64(18446744073709551615) // -1
regionReq.BytesWritten = uint64(18446744073709550602) // -1024
region = core.RegionFromHeartbeat(regionReq)
c.Assert(region.GetKeysWritten(), Equals, uint64(0))
c.Assert(region.GetBytesWritten(), Equals, uint64(0))
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, IsNil)
// Stale heartbeat, update check should fail
regionReq.Term = 5
regionReq.Leader = peers[0]
region = core.RegionFromHeartbeat(regionReq)
err = rc.HandleRegionHeartbeat(region)
c.Assert(err, NotNil)
}
| resetStoreState |
purge_files.py | from urllib.parse import urlencode
from urllib.request import urlopen
from time import time
from json import load
from codecs import getreader
from os import environ
reader = getreader("utf-8")
token = environ['SLACK_TEST_TOKEN'] # Uses legacy test API token - TODO: This will need to be updated
days = 14 # Purge files older than 14 days
timestamp = int(time()) - days * 24 * 60 * 60
def list_files(slack_token, ts_to):
"""
Fetches a list of all the public files on the slack server
:param slack_token:
:param ts_to: Files created before this timestamp
:return: List of public files
"""
params = {
'token': slack_token,
'ts_to': ts_to,
'count': 500,
}
response = reader(urlopen('https://slack.com/api/files.list?' + urlencode(params)))
file_list = load(response)['files']
return file_list
def | (file_ids, slack_token, verbose=False):
"""
Deletes all files with IDs matching the given list
:param file_ids:
:param slack_token:
:param verbose:
"""
size = 0
count = 0
num_files = len(file_ids)
for file_id in file_ids:
count += 1
params = {
'token': slack_token,
'file': file_id
}
response = reader(urlopen('https://slack.com/api/files.info?' + urlencode(params)))
size += load(response)['file']['size']
response = reader(urlopen('https://slack.com/api/files.delete?' + urlencode(params)))
ok = load(response)['ok']
mb = size / 1048576
if verbose:
print("{0} of {1} - {2} {3} ... {4:.2f} MB saved".format(count, num_files, file_id, ok, mb))
def total_file_size(slack_token, verbose=False):
"""
Finds the total size of all files on the slack server
:param slack_token:
:param verbose:
:return:
"""
params = {
'token': slack_token,
'count': 500,
}
response = reader(urlopen('https://slack.com/api/files.list?' + urlencode(params)))
size = 0
file_ids = [f['id'] for f in load(response)['files']]
for file_id in file_ids:
params = {
'token': token,
'file': file_id
}
response = reader(urlopen('https://slack.com/api/files.info?' + urlencode(params)))
size += load(response)['file']['size']
mb = size / 1048576
if verbose:
print('{0:.2f} MB total'.format(mb))
mb = size / 1048576
return '{0:.2f} MB'.format(mb)
if __name__ == '__main__':
files = [f['id'] for f in list_files(token, timestamp)]
delete_files(files, token, verbose=True)
print("{} files deleted".format(len(files)))
print(total_file_size(token))
| delete_files |
assemble.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use crate::FileOpts;
use anyhow::{anyhow, Result};
use bumpalo::Bump;
use clap::Parser;
use hhbc::hackc_unit::HackCUnit;
use options::Options;
use oxidized::relative_path::{self, RelativePath};
use rayon::prelude::*;
use regex::bytes::Regex;
use std::{
fmt,
fs::{self, File},
io::{stdout, Write},
path::{Path, PathBuf},
sync::Mutex,
};
#[derive(Parser, Debug)]
pub struct Opts {
/// Output file. Creates it if necessary
#[clap(short = 'o')]
output_file: Option<PathBuf>,
/// The input hhas file(s) to deserialize back to HackCUnit
#[clap(flatten)]
files: FileOpts,
}
type SyncWrite = Mutex<Box<dyn Write + Sync + Send>>;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct Pos {
pub line: usize,
pub col: usize,
}
#[derive(Debug, PartialEq, Eq)]
pub enum Token<'a> {
//see below in Lexer::new for regex definitions
Global(&'a [u8], Pos),
Variable(&'a [u8], Pos),
TripleStrLiteral(&'a [u8], Pos),
Comment(&'a [u8], Pos),
Decl(&'a [u8], Pos),
StrLiteral(&'a [u8], Pos),
Variadic(Pos),
Semicolon(Pos),
Dash(Pos),
OpenCurly(Pos),
OpenBracket(Pos),
OpenParen(Pos),
CloseParen(Pos),
CloseBracket(Pos),
CloseCurly(Pos),
Equal(Pos),
Number(&'a [u8], Pos),
Comma(Pos),
Lt(Pos),
Gt(Pos),
Colon(Pos),
Identifier(&'a [u8], Pos),
Error(&'a [u8], Pos),
}
impl<'a> Token<'a> {
fn as_bytes(&self) -> &'a [u8] {
match self {
Token::Global(u, _)
| Token::Variable(u, _)
| Token::TripleStrLiteral(u, _)
| Token::Comment(u, _)
| Token::Decl(u, _)
| Token::StrLiteral(u, _)
| Token::Number(u, _)
| Token::Identifier(u, _)
| Token::Error(u, _) => u,
Token::Semicolon(_) => ";".as_bytes(),
Token::Dash(_) => "-".as_bytes(),
Token::OpenCurly(_) => "{".as_bytes(),
Token::OpenBracket(_) => "[".as_bytes(),
Token::OpenParen(_) => "(".as_bytes(),
Token::CloseParen(_) => ")".as_bytes(),
Token::CloseBracket(_) => "]".as_bytes(),
Token::CloseCurly(_) => "}".as_bytes(),
Token::Equal(_) => "=".as_bytes(),
Token::Comma(_) => ",".as_bytes(),
Token::Lt(_) => "<".as_bytes(),
Token::Gt(_) => ">".as_bytes(),
Token::Colon(_) => ":".as_bytes(),
Token::Variadic(_) => "...".as_bytes(),
}
}
}
impl<'a> fmt::Display for Token<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = std::str::from_utf8(self.as_bytes()).map_err(|_| fmt::Error)?;
match self {
Token::Global(_, pos) => write!(f, "Global(\"{text}\", {pos:?})"),
Token::Variable(_, pos) => write!(f, "Variable(\"{text}\", {pos:?})"),
Token::TripleStrLiteral(_, pos) => write!(f, "TripleStrLiteral(\"{text}\", {pos:?})"),
Token::Comment(_, pos) => write!(f, "Comment(\"{text}\", {pos:?})"),
Token::Decl(_, pos) => write!(f, "Decl(\"{text}\", {pos:?})"),
Token::StrLiteral(_, pos) => write!(f, "StrLiteral(\"{text}\", {pos:?})"),
Token::Number(_, pos) => write!(f, "Number(\"{text}\", {pos:?})"),
Token::Identifier(_, pos) => write!(f, "Identifier(\"{text}\", {pos:?})"),
Token::Error(_, pos) => write!(f, "Error(\"{text}\", {pos:?})"),
Token::Semicolon(pos) => write!(f, "Semicolon(\"{text}\", {pos:?})"),
Token::Dash(pos) => write!(f, "Dash(\"{text}\", {pos:?})"),
Token::OpenCurly(pos) => write!(f, "OpenCurly(\"{text}\", {pos:?})"),
Token::OpenBracket(pos) => write!(f, "OpenBracket(\"{text}\", {pos:?})"),
Token::OpenParen(pos) => write!(f, "OpenParen(\"{text}\", {pos:?})"),
Token::CloseParen(pos) => write!(f, "CloseParen(\"{text}\", {pos:?})"),
Token::CloseBracket(pos) => write!(f, "CloseBracket(\"{text}\", {pos:?})"),
Token::CloseCurly(pos) => write!(f, "CloseCurly(\"{text}\", {pos:?})"),
Token::Equal(pos) => write!(f, "Equal(\"{text}\", {pos:?})"),
Token::Comma(pos) => write!(f, "Comma(\"{text}\", {pos:?})"),
Token::Lt(pos) => write!(f, "Lt(\"{text}\", {pos:?})"),
Token::Gt(pos) => write!(f, "Gt(\"{text}\", {pos:?})"),
Token::Colon(pos) => write!(f, "Colon(\"{text}\", {pos:?})"),
Token::Variadic(pos) => write!(f, "Variadic(\"{text}\", {pos:?})"),
}
}
}
// We initially planned on using Logos, a crate for tokenizing really fast.
// We chose not to use Logos because it doesn't support all regexes -- for instance, it can't
// tokenize based on the regex "\"\"\".*\"\"\"". Here's the git issue:
// https://github.com/maciejhirsz/logos/issues/246
pub struct Lexer<'a> {
tokens: Vec<Token<'a>>,
}
impl<'a> IntoIterator for Lexer<'a> {
type Item = Token<'a>;
type IntoIter = <Vec<Token<'a>> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.tokens.into_iter()
}
}
fn build_tokens_helper<'a>(
s: &'a [u8],
cur_pos: &mut Pos,
tokens: &mut Vec<Token<'a>>,
big_regex: &Regex,
) -> &'a [u8] {
if let Some(mat) = big_regex.find(s) {
let mut chars = s.iter(); //implicit assumption: matched to the start (^), so we iter from the start
debug_assert!(mat.start() == 0);
match chars.next().unwrap() {
//get first character
b'\n' => {
cur_pos.line += 1;
cur_pos.col = 1;
&s[mat.end()..]
}
//Note these don't match what prints out on a printer, but not sure how to generalize
b'\x0C' => {
//form feed
cur_pos.col += 1;
&s[mat.end()..]
}
b'\r' => {
cur_pos.col = 1;
&s[mat.end()..]
}
b'\t' => {
cur_pos.col += 4;
&s[mat.end()..]
}
b' ' => {
cur_pos.col += 1;
&s[mat.end()..]
} //don't add whitespace as tokens, just increase line and col
o => {
let end = mat.end();
let tok = match o {
b'#' => Token::Comment(&s[..end], *cur_pos), //comment
b'@' => Token::Global(&s[..end], *cur_pos), //global
b'$' => Token::Variable(&s[..end], *cur_pos), //var
b'.' => {
if *(chars.next().unwrap()) == b'.' && *(chars.next().unwrap()) == b'.' {
//variadic
Token::Variadic(*cur_pos)
} else {
Token::Decl(&s[..end], *cur_pos) //decl
}
}
b';' => Token::Semicolon(*cur_pos), //semicolon
b'{' => Token::OpenCurly(*cur_pos), //opencurly
b'[' => Token::OpenBracket(*cur_pos),
b'(' => Token::OpenParen(*cur_pos),
b')' => Token::CloseParen(*cur_pos),
b']' => Token::CloseBracket(*cur_pos),
b'}' => Token::CloseCurly(*cur_pos),
b',' => Token::Comma(*cur_pos),
b'<' => Token::Lt(*cur_pos), //<
b'>' => Token::Gt(*cur_pos), //>
b'=' => Token::Equal(*cur_pos), //=
b'-' => {
if chars.next().unwrap().is_ascii_digit() {
//negative number
Token::Number(&s[..end], *cur_pos)
} else {
Token::Dash(*cur_pos)
}
}
b':' => Token::Colon(*cur_pos),
b'"' => {
if *(chars.next().unwrap()) == b'"' && *(chars.next().unwrap()) == b'"' {
//triple string literal
Token::TripleStrLiteral(&s[..end], *cur_pos)
} else {
//single string literal
Token::StrLiteral(&s[..end], *cur_pos)
}
}
dig_or_id => {
if dig_or_id.is_ascii_digit()
|| (*dig_or_id as char == '+'
&& (chars.next().unwrap()).is_ascii_digit())
//positive numbers denoted with +
{
Token::Number(&s[..end], *cur_pos)
} else {
Token::Identifier(&s[..end], *cur_pos)
}
}
};
tokens.push(tok);
cur_pos.col += end - mat.start(); //advance col by length of token
&s[end..]
}
}
} else {
//couldn't tokenize the string, so add the rest of it as an error
tokens.push(Token::Error(
s,
Pos {
line: cur_pos.line,
col: cur_pos.col,
},
));
//done advancing col and line cuz at end
&[]
}
}
impl<'a> Lexer<'a> {
pub fn from_str(s: &'a [u8]) -> Self {
//first create the regex that matches any token. Done this way for readability
let v = [
r#"""".*""""#, //triple str literal
"#.*", //comment
r"(?-u)[\.@][_a-zA-Z\x80-\xff][_/a-zA-Z0-9\x80-\xff]*", //decl, var, global. (?-u) turns off utf8 check
r"(?-u)\$[_a-zA-Z0-9\x80-\xff][_/a-zA-Z0-9\x80-\xff]*", //var. See /home/almathaler/fbsource/fbcode/hphp/test/quick/reified-and-variadic.php's assembly for a var w/ a digit at front
r#""((\\.)|[^\\"])*""#, //str literal
r"[-+]?[0-9]+\.?[0-9]*", //number
r"(?-u)[_/a-zA-Z\x80-\xff][_/\\a-zA-Z0-9\x80-\xff]*", //identifier
";",
"-",
"=",
r"\{",
r"\[",
r"\(",
r"\)",
r"\]",
r"\}",
",",
"<",
">",
":",
r"\.\.\.", //variadic
"\n",
r"[ \t\r\f]+",
];
let big_regex = format!("^(({}))", v.join(")|("));
let big_regex = Regex::new(&big_regex).unwrap();
let mut cur_pos = Pos { line: 1, col: 1 };
let mut tokens = Vec::new();
let mut source = s;
while !source.is_empty() {
source = build_tokens_helper(source, &mut cur_pos, &mut tokens, &big_regex);
}
Lexer { tokens }
}
}
/// Tokenizes input string using a Lexer. Prints all tokens in the Lexer
fn print_tokens<'a>(s: &'a [u8]) {
let lex: Lexer<'a> = Lexer::from_str(s);
for tok in lex {
println!("{}", tok);
}
}
/// Assembles the hhas represented by the vec of bytes input
fn assemble_from_bytes<'arena>(
_alloc: &'arena Bump,
s: &[u8],
_opts: &Opts,
) -> Result<HackCUnit<'arena>> |
/// Assembles the hhas within f to a HackCUnit. Currently just returns default HCU
pub fn assemble<'arena>(alloc: &'arena Bump, f: &Path, opts: &Opts) -> Result<HackCUnit<'arena>> {
let s: Vec<u8> = fs::read(f)?;
let s = s.as_slice();
let _tr: Result<HackCUnit<'_>> = assemble_from_bytes(alloc, s, opts);
todo!()
}
pub fn run(mut opts: Opts) -> Result<()> {
//Create writer to output/stdout
let writer: SyncWrite = match &opts.output_file {
None => Mutex::new(Box::new(stdout())),
Some(output_file) => Mutex::new(Box::new(File::create(output_file)?)),
};
//May have specified multiple files
let files = opts.files.gather_input_files()?;
//Process each file
files
.into_par_iter()
.map(|path| process_one_file(&path, &opts, &writer))
.collect::<Vec<_>>()
.into_iter()
.collect()
}
/// Assemble the hhas in a given file to a HackCUnit. Then use bytecode printer
/// to write the hhas representation of that HCU to output
/// 5/31: Side-effect: prints tokenized input file to terminal
pub fn process_one_file(f: &Path, opts: &Opts, w: &SyncWrite) -> Result<()> {
let alloc = Bump::default();
//if it's not an hhas file don't assemble. Return Err(e):
if Path::extension(f) == Some(std::ffi::OsStr::new("hhas")) {
let hcu = assemble(&alloc, f, opts)?; //assemble will print the tokens to output
let filepath = RelativePath::make(relative_path::Prefix::Dummy, f.to_owned());
let comp_options: Options = Default::default();
//note: why not make a native_env based on the filepath
//and then use its to_options -- why is to_options() private?
let ctxt = bytecode_printer::Context::new(&comp_options, Some(&filepath), false);
let mut output = Vec::new();
match bytecode_printer::print_unit(&ctxt, &mut output, &hcu) {
Err(e) => {
eprintln!("Error bytecode_printing file {}: {}", f.display(), e);
Err(anyhow!("bytecode_printer problem"))
}
Ok(_) => {
w.lock().unwrap().write_all(&output)?;
Ok(())
}
}
} else {
Err(anyhow!(
"can't assemble non-hhas file: {:?}, extension: {:?}",
f,
Path::extension(f).unwrap()
))
}
}
#[cfg(test)]
mod test {
#[test]
fn difficult_strings() {
use crate::assemble::Lexer;
use crate::assemble::Token;
let s = r#""\"0\""
"12345\\:2\\"
"class_meth() expects a literal class name or ::class constant, followed by a constant string that refers to a static method on that class";
"#;
let s = s.as_bytes();
let l: Lexer<'_> = Lexer::from_str(s);
let mut l = l.into_iter();
//expecting 3 string tokens
let _st1 = l.next().unwrap();
let _by1 = str::as_bytes(r#""\"0\"""#);
assert!(matches!(_st1, Token::StrLiteral(_by1, _)));
let _st2 = l.next().unwrap();
let _by2 = str::as_bytes(r#""12345\\:2\\""#);
assert!(matches!(_st1, Token::StrLiteral(_by2, _)));
let _st3 = l.next().unwrap();
let _by3 = str::as_bytes(
r#""class_meth() expects a literal class name or ::class constant, followed by a constant string that refers to a static method on that class""#,
);
assert!(matches!(_st1, Token::StrLiteral(_by3, _)));
}
#[test]
fn odd_unicode_test() {
use crate::assemble::Lexer;
use crate::assemble::Token;
let s: &[u8] = b".\xA9\xEF\xB8\x8E $0\xC5\xA3\xB1\xC3 \xE2\x98\xBA\xE2\x98\xBA\xE2\x98\xBA @\xE2\x99\xA1\xE2\x99\xA4$";
let l: Lexer<'_> = Lexer::from_str(s);
//we are expecting an decl, a var, an identifier a global, and an error on the last empty variable
let mut l = l.into_iter();
let decl = l.next().unwrap();
assert!(matches!(decl, Token::Decl(..)));
let var = l.next().unwrap();
assert!(matches!(var, Token::Variable(..)));
let iden = l.next().unwrap();
assert!(matches!(iden, Token::Identifier(..)));
let glob = l.next().unwrap();
assert!(matches!(glob, Token::Global(..)));
let err = l.next().unwrap();
assert!(matches!(err, Token::Error(..)))
}
#[test]
fn every_token_test() {
use crate::assemble::Lexer;
use crate::assemble::Token;
let s = r#"@_global $0Var """tripleStrLiteral:)""" #hashtagComment
.Decl "str!Literal" ...
;-{[( )]} =98 -98 +101. 43.2 , < > : _/identifier/ /filepath ."#;
//expect glob var tsl comment decl strlit semicolon dash open_curly open_brack open_paren close_paren close_bracket
//close_curly equal number number number number , < > : identifier identifier ERROR on the last .
let s = s.as_bytes();
let l: Lexer<'_> = Lexer::from_str(s);
let mut l = l.into_iter();
let glob = l.next().unwrap();
eprintln!("{}", glob);
assert!(matches!(glob, Token::Global(..)));
let var = l.next().unwrap();
eprintln!("{}", var);
assert!(matches!(var, Token::Variable(..)));
let tsl = l.next().unwrap();
eprintln!("{}", tsl);
assert!(matches!(tsl, Token::TripleStrLiteral(..)));
let comment = l.next().unwrap();
eprintln!("{}", comment);
assert!(matches!(comment, Token::Comment(..)));
let decl = l.next().unwrap();
eprintln!("{}", decl);
assert!(matches!(decl, Token::Decl(..)));
let strlit = l.next().unwrap();
eprintln!("{}", strlit);
assert!(matches!(strlit, Token::StrLiteral(..)));
let variadic = l.next().unwrap();
eprintln!("{}", variadic);
assert!(matches!(variadic, Token::Variadic(..)));
let semicolon = l.next().unwrap();
eprintln!("{}", semicolon);
assert!(matches!(semicolon, Token::Semicolon(..)));
let dash = l.next().unwrap();
eprintln!("{}", dash);
assert!(matches!(dash, Token::Dash(..)));
let oc = l.next().unwrap();
eprintln!("{}", dash);
assert!(matches!(oc, Token::OpenCurly(..)));
let ob = l.next().unwrap();
eprintln!("{}", ob);
assert!(matches!(ob, Token::OpenBracket(..)));
let op = l.next().unwrap();
eprintln!("{}", op);
assert!(matches!(op, Token::OpenParen(..)));
let cp = l.next().unwrap();
eprintln!("{}", cp);
assert!(matches!(cp, Token::CloseParen(..)));
let cb = l.next().unwrap();
eprintln!("{}", cb);
assert!(matches!(cb, Token::CloseBracket(..)));
let cc = l.next().unwrap();
eprintln!("{}", cc);
assert!(matches!(cc, Token::CloseCurly(..)));
let eq = l.next().unwrap();
eprintln!("{}", eq);
assert!(matches!(eq, Token::Equal(..)));
let num = l.next().unwrap();
eprintln!("{}", num);
assert!(matches!(num, Token::Number(..)));
let num = l.next().unwrap();
eprintln!("{}", num);
assert!(matches!(num, Token::Number(..)));
let num = l.next().unwrap();
eprintln!("{}", num);
assert!(matches!(num, Token::Number(..)));
let num = l.next().unwrap();
eprintln!("{}", num);
assert!(matches!(num, Token::Number(..)));
let comma = l.next().unwrap();
eprintln!("{}", comma);
assert!(matches!(comma, Token::Comma(..)));
let lt = l.next().unwrap();
eprintln!("{}", lt);
assert!(matches!(lt, Token::Lt(..)));
let gt = l.next().unwrap();
eprintln!("{}", gt);
assert!(matches!(gt, Token::Gt(..)));
let colon = l.next().unwrap();
eprintln!("{}", colon);
assert!(matches!(colon, Token::Colon(..)));
let iden = l.next().unwrap();
eprintln!("{}", iden);
assert!(matches!(iden, Token::Identifier(..)));
let iden = l.next().unwrap();
eprintln!("{}", iden);
assert!(matches!(iden, Token::Identifier(..)));
let err = l.next().unwrap();
eprintln!("{}", err);
assert!(matches!(err, Token::Error(..)));
}
}
| {
//lifetime of lexer is tied to lifetime of bytes
print_tokens(s);
let tr: Result<HackCUnit<'_>> = Ok(Default::default());
tr
} |
replay.rs | //! Parsing for replay and score files, which are very similar.
use crate::prelude::*;
/// The LZMA compression level (a number between 0 and 9) used to write replay data when it is
/// not otherwise specified.
const DEFAULT_COMPRESSION_LEVEL: u32 = 5;
/// An osu! replay.
/// The replay might come from a large `ScoreList` score database, or from an individual standalone
/// `.osr` file.
#[cfg_attr(feature = "ser-de", derive(Serialize, Deserialize))]
#[derive(Debug, Clone, PartialEq)]
pub struct Replay {
/// The gamemode the replay was scored in.
pub mode: Mode,
/// The `.db` version of the replay file.
/// If the replay is inside a `scores.db` file, the version should be redundant with it (?).
pub version: u32,
/// The MD5 hash of the beatmap played.
pub beatmap_hash: Option<String>,
/// The name of the player who scored the replay.
pub player_name: Option<String>,
/// The replay-specific MD5 hash.
pub replay_hash: Option<String>,
/// Amount of 300s (fruits in ctb).
pub count_300: u16,
/// Amount of 100s (drops in ctb, 150s in taiko and 200s in mania).
pub count_100: u16,
/// Amount of 50s (droplets in ctb).
pub count_50: u16,
/// Amount of gekis ("MAX scores" or "rainbow 300s" in mania).
pub count_geki: u16,
/// Amount of katsus (200s in mania, droplet misses in ctb).
pub count_katsu: u16,
/// Amount of misses (fruit + drop misses in ctb).
pub count_miss: u16,
/// The numerical score achieved.
pub score: u32,
pub max_combo: u16,
pub perfect_combo: bool,
/// The mod combination with which the replay was done.
pub mods: ModSet,
/// A string representing a graph of how much life bar did the player have along the beatmap.
///
/// It is a comma-separated list of human-readable entries in the form `<offset>|<life>`, where
/// `<offset>` is the amount of milliseconds since the start of the song and `<life>` is a
/// number between 0 and 1 representing the amount of life left.
pub life_graph: Option<String>,
/// When was the replay scored.
pub timestamp: DateTime<Utc>,
/// Decompressed replay data.
///
/// Only available on standalone `.osr` replays, and if the `compression` feature is enabled
/// (enabled by default).
///
/// When writing `.osr` files (and `.osr` files only), if the `compression` feature is enabled
/// and this field is `Some`, these actions will be compressed and written. Otherwise,
/// `raw_replay_data` will be written instead.
pub replay_data: Option<Vec<Action>>,
/// Raw replay data, available on `.osr` files even if the `compression` feature is not enabled.
///
/// When writing, this field is used as a fallback if `replay_data` is `None` or the
/// `compression` feature is disabled.
pub raw_replay_data: Option<Vec<u8>>,
/// Online score id.
/// Only has a useful value on replays embedded in a `ScoreList`.
pub online_score_id: u64,
}
impl Replay {
/// Parse a replay from its raw bytes.
pub fn from_bytes(bytes: &[u8]) -> Result<Replay, Error> {
replay(bytes, true).map(|(_rem, replay)| replay)
}
/// Read a replay from a standalone `.osr` osu! replay file.
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Replay, Error> {
Self::from_bytes(&fs::read(path)?)
}
/// Write the replay to an arbitrary writer, with the given compression level.
///
/// If the compression level is `None` the arbitrary default
/// `replay::DEFAULT_COMPRESSION_LEVEL` will be used.
/// If the `compression` feature is disabled this argument has no effect.
pub fn to_writer<W: Write>(
&self,
mut out: W,
compression_level: Option<u32>,
) -> io::Result<()> {
self.wr_args(
&mut out,
Some(compression_level.unwrap_or(DEFAULT_COMPRESSION_LEVEL)),
)
}
/// Similar to `to_writer` but writes the replay to an `osr` file.
pub fn save<P: AsRef<Path>>(&self, path: P, compression_level: Option<u32>) -> io::Result<()> {
self.to_writer(BufWriter::new(File::create(path)?), compression_level)
}
}
pub(crate) fn replay(bytes: &[u8], standalone: bool) -> Result<(&[u8], Replay), Error> {
let (rem, mode) = map_opt(byte, Mode::from_raw)(bytes)?;
let (rem, version) = int(rem)?;
let (rem, beatmap_hash) = opt_string(rem)?;
let (rem, player_name) = opt_string(rem)?;
let (rem, replay_hash) = opt_string(rem)?;
let (rem, count_300) = short(rem)?;
let (rem, count_100) = short(rem)?;
let (rem, count_50) = short(rem)?;
let (rem, count_geki) = short(rem)?;
let (rem, count_katsu) = short(rem)?;
let (rem, count_miss) = short(rem)?;
let (rem, score) = int(rem)?;
let (rem, max_combo) = short(rem)?;
let (rem, perfect_combo) = boolean(rem)?;
let (rem, mods) = map(int, ModSet::from_bits)(rem)?;
let (rem, life_graph) = opt_string(rem)?;
let (rem, timestamp) = datetime(rem)?;
let (rem, raw_replay_data) = if standalone {
map(length_data(int), Some)(rem)?
} else {
let (rem, _tag) = tag(&[0xff, 0xff, 0xff, 0xff])(rem)?;
(rem, None)
};
let replay_data = parse_replay_data(raw_replay_data)?;
let (rem, online_score_id) = long(rem)?;
let replay = Replay {
mode,
version,
beatmap_hash,
player_name,
replay_hash,
count_300,
count_100,
count_50,
count_geki,
count_katsu,
count_miss,
score,
max_combo,
perfect_combo,
mods,
life_graph,
timestamp,
replay_data,
raw_replay_data: raw_replay_data.map(ToOwned::to_owned),
online_score_id,
};
Ok((rem, replay))
}
writer!(Replay [this,out,compress_data: Option<u32>] {
this.mode.raw().wr(out)?;
this.version.wr(out)?;
this.beatmap_hash.wr(out)?;
this.player_name.wr(out)?;
this.replay_hash.wr(out)?;
this.count_300.wr(out)?;
this.count_100.wr(out)?;
this.count_50.wr(out)?;
this.count_geki.wr(out)?;
this.count_katsu.wr(out)?;
this.count_miss.wr(out)?;
this.score.wr(out)?;
this.max_combo.wr(out)?;
this.perfect_combo.wr(out)?;
this.mods.bits().wr(out)?;
this.life_graph.wr(out)?;
this.timestamp.wr(out)?;
if let Some(compression_level) = compress_data {
write_replay_data(
this.replay_data.as_deref(),
this.raw_replay_data.as_deref(),
out,
compression_level
)?;
}else{
0xffffffff_u32.wr(out)?;
}
this.online_score_id.wr(out)?;
});
/// Represents a single action within a replay.
/// The meaning of an action depends on the gamemode of the replay, but all actions
/// contain:
///
/// - An integral amount of milliseconds elapsed since the last action, `delta`.
/// - 3 pieces of floating-point payload: `x`, `y` and `z`.
#[cfg_attr(feature = "ser-de", derive(Serialize, Deserialize))]
#[derive(Debug, Clone, PartialEq)]
pub struct Action {
/// The amount of milliseconds since the last action.
pub delta: i64,
/// First bit of payload in the action.
///
/// In standard:
/// Represents the `x` coordinate of the mouse, from `0` to `512`.
///
/// In mania:
/// Represents the bitwise combination of buttons pressed.
pub x: f32,
/// Second bit of payload in the action.
///
/// In standard:
/// Represents the `y` coordinate of the mouse, from `0` to `384`.
pub y: f32,
/// Third bit of payload in the action.
///
/// In standard:
/// Represents the bitwise combination of buttons pressed.
pub z: f32,
}
impl Action {
/// Get the pressed osu!standard buttons.
pub fn std_buttons(&self) -> StandardButtonSet {
StandardButtonSet::from_bits(self.z as u32)
}
/// Get the pressed osu!mania buttons.
pub fn mania_buttons(&self) -> ManiaButtonSet {
ManiaButtonSet::from_bits(self.x as u32)
}
}
#[cfg_attr(feature = "ser-de", derive(Serialize, Deserialize))]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[repr(u32)]
pub enum StandardButton {
MousePrimary,
MouseSecondary,
KeyPrimary,
KeySecondary,
}
impl StandardButton {
pub fn raw(&self) -> u32 |
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct StandardButtonSet(pub u32);
impl StandardButtonSet {
pub fn bits(self) -> u32 {
self.0
}
pub fn from_bits(bits: u32) -> StandardButtonSet {
StandardButtonSet(bits)
}
/// Create a new button combination with no buttons pressed.
pub fn none() -> StandardButtonSet {
StandardButtonSet::from_bits(0)
}
/// Check whether the combination lists the button as pressed.
pub fn is_down(&self, button: StandardButton) -> bool {
self.bits().bit(button.raw() as u32)
}
/// Set the pressed status of the given button.
pub fn set_down(&self, button: StandardButton, is_down: bool) -> StandardButtonSet {
let mut bits = self.bits();
bits.set_bit(button.raw() as u32, is_down);
StandardButtonSet::from_bits(bits)
}
/// Set the pressed status of a button to `true`.
pub fn press(&self, button: StandardButton) -> StandardButtonSet {
self.set_down(button, true)
}
/// Set the pressed status of a button to `false`.
pub fn release(&self, button: StandardButton) -> StandardButtonSet {
self.set_down(button, false)
}
}
/// Any combination of mania buttons being pressed.
///
/// Button indices start from `0`, and go left-to-right.
/// Button indices outside the replay key count should never be down.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct ManiaButtonSet(pub u32);
impl ManiaButtonSet {
pub fn bits(&self) -> u32 {
self.0
}
pub fn from_bits(bits: u32) -> ManiaButtonSet {
ManiaButtonSet(bits)
}
/// Create a new key combination with no keys pressed.
pub fn none() -> ManiaButtonSet {
ManiaButtonSet::from_bits(0)
}
/// Check whether a certain key is pressed.
pub fn is_down(&self, button: u32) -> bool {
self.bits().bit(button)
}
/// Set the pressed status of a key.
pub fn set_down(&self, button: u32, is_down: bool) -> ManiaButtonSet {
let mut bits = self.bits();
bits.set_bit(button, is_down);
ManiaButtonSet::from_bits(bits)
}
/// Set the pressed status of a key to `true`.
pub fn press(&self, button: u32) -> ManiaButtonSet {
self.set_down(button, true)
}
/// Set the pressed status of a key to `false`.
pub fn release(&self, button: u32) -> ManiaButtonSet {
self.set_down(button, false)
}
}
fn parse_replay_data(raw: Option<&[u8]>) -> Result<Option<Vec<Action>>, Error> {
#[cfg(feature = "compression")]
{
if let Some(raw) = raw {
use xz2::{stream::Stream, write::XzDecoder};
let mut decoder =
XzDecoder::new_stream(Vec::new(), Stream::new_lzma_decoder(u64::MAX)?);
decoder.write_all(raw)?;
let data = decoder.finish()?;
let actions = actions(&data)?.1;
return Ok(Some(actions));
}
}
Ok(None)
}
fn write_replay_data<W: Write>(
actions: Option<&[Action]>,
raw: Option<&[u8]>,
out: &mut W,
compression_level: u32,
) -> io::Result<()> {
let mut raw = raw.as_deref();
let compress_buf: Vec<u8>;
//Compress if it's enabled and available
#[cfg(feature = "compression")]
{
if let Some(actions) = actions {
use xz2::{
stream::{LzmaOptions, Stream},
write::XzEncoder,
};
let mut encoder = XzEncoder::new_stream(
Vec::new(),
Stream::new_lzma_encoder(&LzmaOptions::new_preset(compression_level)?)?,
);
for action in actions.iter() {
action.wr(&mut encoder)?;
}
compress_buf = encoder.finish()?;
raw = Some(&compress_buf[..]);
}
}
let raw = raw.unwrap_or_default();
//Prefix the data with its length
(raw.len() as u32).wr(out)?;
out.write_all(raw)?;
Ok(())
}
// Parse the plaintext list of actions.
fn actions(bytes: &[u8]) -> IResult<&[u8], Vec<Action>> {
many0(action)(bytes)
}
fn action(bytes: &[u8]) -> IResult<&[u8], Action> {
let (rem, delta) = number(bytes)?;
let (rem, _tag) = tag(b"|")(rem)?;
let (rem, x) = number(rem)?;
let (rem, _tag) = tag(b"|")(rem)?;
let (rem, y) = number(rem)?;
let (rem, _tag) = tag(b"|")(rem)?;
let (rem, z) = number(rem)?;
let (rem, _tag) = tag(b",")(rem)?;
let action = Action {
delta: delta as i64,
x: x as f32,
y: y as f32,
z: z as f32,
};
Ok((rem, action))
}
writer!(Action [this,out] {
write!(out, "{}|{}|{}|{},", this.delta,this.x,this.y,this.z)?;
});
// Parse a textually encoded decimal number.
fn number(bytes: &[u8]) -> IResult<&[u8], f64> {
let (rem, sign) = opt(tag(b"-"))(bytes)?;
let (rem, whole) = take_while1(|b: u8| b.is_ascii_digit())(rem)?;
let (rem, decimal) = opt(number_bytes)(rem)?;
let mut num = 0.0;
for byte in whole {
num *= 10.0;
num += (*byte - b'0') as f64;
}
if let Some(decimal) = decimal {
let mut value = 1.0;
for byte in decimal {
value /= 10.0;
num += (*byte - b'0') as f64 * value;
}
}
if sign.is_some() {
num *= -1.0
}
Ok((rem, num))
}
fn number_bytes(bytes: &[u8]) -> IResult<&[u8], &[u8]> {
let (rem, _tag) = tag(b".")(bytes)?;
take_while(|b: u8| b.is_ascii_digit())(rem)
}
| {
*self as u32
} |
util.rs | use std::fmt::{Display, Formatter, Result as FmtResult};
use std::str::{from_utf8, from_utf8_unchecked};
#[derive(Clone, Copy)]
pub struct Utf8Lossy<'a>(pub &'a [u8]);
impl<'a> From<&'a [u8]> for Utf8Lossy<'a> {
fn | (bytes: &'a [u8]) -> Self {
Utf8Lossy(bytes)
}
}
impl Display for Utf8Lossy<'_> {
// Copied from https://doc.rust-lang.org/std/str/struct.Utf8Error.html#examples
fn fmt(&self, f: &mut Formatter) -> FmtResult {
let mut input = self.0;
loop {
match from_utf8(input) {
Ok(valid) => return valid.fmt(f),
Err(e) => {
let (valid, after_valid) = input.split_at(e.valid_up_to());
unsafe {
write!(f, "{}", from_utf8_unchecked(valid))?;
}
write!(f, "\u{FFDD}")?;
if let Some(invalid_sequence_length) = e.error_len() {
input = &after_valid[invalid_sequence_length..]
} else {
return Ok(());
}
}
}
}
}
}
| from |
service_router_test.go | package toolbox_test
import (
"fmt"
"github.com/stretchr/testify/assert"
"github.com/viant/toolbox"
"log"
"net/http"
"testing"
"time"
)
type ReverseService struct{}
func (this ReverseService) Reverse(values []int) []int {
var result = make([]int, 0)
for i := len(values) - 1; i >= 0; i-- {
result = append(result, values[i])
}
return result
}
func (this ReverseService) Reverse2(values []int) []int {
var result = make([]int, 0)
for i := len(values) - 1; i >= 0; i-- {
result = append(result, values[i])
}
return result
}
var ReverseInvoker = func(serviceRouting *toolbox.ServiceRouting, request *http.Request, response http.ResponseWriter, uriParameters map[string]interface{}) error {
var function = serviceRouting.Handler.(func(values []int) []int)
idsParam := uriParameters["ids"]
ids := idsParam.([]string)
values := make([]int, 0)
for _, item := range ids {
values = append(values, toolbox.AsInt(item))
}
var result = function(values)
err := toolbox.WriteServiceRoutingResponse(response, request, serviceRouting, result)
if err != nil {
return err
}
return nil
}
func | (port string, t *testing.T) {
service := ReverseService{}
router := toolbox.NewServiceRouter(
toolbox.ServiceRouting{
HTTPMethod: "GET",
URI: "/v1/reverse/{ids}",
Handler: service.Reverse,
Parameters: []string{"ids"},
},
toolbox.ServiceRouting{
HTTPMethod: "POST",
URI: "/v1/reverse/",
Handler: service.Reverse,
Parameters: []string{"ids"},
},
toolbox.ServiceRouting{
HTTPMethod: "DELETE",
URI: "/v1/delete/{ids}",
Handler: service.Reverse,
Parameters: []string{"ids"},
},
toolbox.ServiceRouting{
HTTPMethod: "GET",
URI: "/v1/reverse2/{ids}",
Handler: service.Reverse,
Parameters: []string{"ids"},
HandlerInvoker: ReverseInvoker,
},
)
http.HandleFunc("/v1/", func(writer http.ResponseWriter, reader *http.Request) {
err := router.Route(writer, reader)
assert.Nil(t, err)
})
fmt.Printf("Started test server on port %v\n", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func TestServiceRouter(t *testing.T) {
go func() {
StartServer("8082", t)
}()
time.Sleep(2 * time.Second)
var result = make([]int, 0)
{
err := toolbox.RouteToService("get", "http://127.0.0.1:8082/v1/reverse/1,7,3", nil, &result)
if err != nil {
t.Errorf("failed to send get request %v", err)
}
assert.EqualValues(t, []int{3, 7, 1}, result)
}
{
err := toolbox.RouteToService("post", "http://127.0.0.1:8082/v1/reverse/", []int{1, 7, 3}, &result)
if err != nil {
t.Errorf("failed to send get request %v", err)
}
assert.EqualValues(t, []int{3, 7, 1}, result)
}
{
err := toolbox.RouteToService("delete", "http://127.0.0.1:8082/v1/delete/", []int{1, 7, 3}, &result)
if err != nil {
t.Errorf("failed to send delete request %v", err)
}
assert.EqualValues(t, []int{3, 7, 1}, result)
}
{
err := toolbox.RouteToService("delete", "http://127.0.0.1:8082/v1/delete/1,7,3", nil, &result)
if err != nil {
t.Errorf("failed to send delete request %v", err)
}
assert.EqualValues(t, []int{3, 7, 1}, result)
}
{ //Test custom handler invocation without reflection
err := toolbox.RouteToService("get", "http://127.0.0.1:8082/v1/reverse2/1,7,3", nil, &result)
if err != nil {
t.Errorf("failed to send delete request %v", err)
}
assert.EqualValues(t, []int{3, 7, 1}, result)
}
}
| StartServer |
fixtures.go | package testing
import (
"fmt"
"net/http"
"testing"
"github.com/gophercloud/gophercloud/openstack/orchestration/v1/resourcetypes"
th "github.com/gophercloud/gophercloud/testhelper"
fake "github.com/gophercloud/gophercloud/testhelper/client"
)
const BasicListOutput = `
{
"resource_types": [
"OS::Nova::Server",
"OS::Heat::Stack"
]
}
`
var BasicListExpected = []resourcetypes.ResourceTypeSummary{
{
ResourceType: "OS::Nova::Server",
},
{
ResourceType: "OS::Heat::Stack",
},
}
const FullListOutput = `
{
"resource_types": [
{
"description": "A Nova Server",
"resource_type": "OS::Nova::Server"
},
{
"description": "A Heat Stack",
"resource_type": "OS::Heat::Stack"
}
]
}
`
var FullListExpected = []resourcetypes.ResourceTypeSummary{
{
ResourceType: "OS::Nova::Server",
Description: "A Nova Server",
},
{
ResourceType: "OS::Heat::Stack",
Description: "A Heat Stack",
},
}
const listFilterRegex = "OS::Heat::.*"
const FilteredListOutput = `
{
"resource_types": [
{
"description": "A Heat Stack",
"resource_type": "OS::Heat::Stack"
}
]
}
`
var FilteredListExpected = []resourcetypes.ResourceTypeSummary{
{
ResourceType: "OS::Heat::Stack",
Description: "A Heat Stack",
},
}
// HandleListSuccessfully creates an HTTP handler at `/resource_types`
// on the test handler mux that responds with a `List` response.
func HandleListSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/resource_types",
func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Accept", "application/json")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
r.ParseForm()
var output string
if r.Form.Get("with_description") == "true" {
if r.Form.Get("name") == listFilterRegex {
output = FilteredListOutput
} else {
output = FullListOutput
}
} else {
output = BasicListOutput
}
fmt.Fprint(w, output)
})
}
var glanceImageConstraint = "glance.image"
var GetSchemaExpected = resourcetypes.ResourceSchema{
ResourceType: "OS::Test::TestServer",
SupportStatus: resourcetypes.SupportStatusDetails{
Status: resourcetypes.SupportStatusDeprecated,
Message: "Bye bye.",
Version: "10.0.0",
PreviousStatus: &resourcetypes.SupportStatusDetails{
Status: resourcetypes.SupportStatusSupported,
},
},
Attributes: map[string]resourcetypes.AttributeSchema{
"show": {
Description: "Detailed information about resource.",
Type: resourcetypes.MapProperty,
},
"tags": {
Description: "Tags from the server.",
Type: resourcetypes.ListProperty,
},
"name": {
Description: "Name of the server.",
Type: resourcetypes.StringProperty,
},
},
Properties: map[string]resourcetypes.PropertySchema{
"name": {
Type: resourcetypes.StringProperty,
Description: "Server name.",
UpdateAllowed: true,
},
"image": {
Type: resourcetypes.StringProperty,
Description: "The ID or name of the image to boot with.",
Required: true,
Constraints: []resourcetypes.ConstraintSchema{
{
CustomConstraint: &glanceImageConstraint,
},
},
},
"block_device_mapping": {
Type: resourcetypes.ListProperty,
Description: "Block device mappings for this server.",
Schema: map[string]resourcetypes.PropertySchema{
"*": {
Type: resourcetypes.MapProperty,
Schema: map[string]resourcetypes.PropertySchema{
"ephemeral_format": {
Type: resourcetypes.StringProperty,
Description: "The format of the local ephemeral block device.",
Constraints: []resourcetypes.ConstraintSchema{
{
AllowedValues: &[]interface{}{
"ext3", "ext4", "xfs",
},
},
},
},
"ephemeral_size": {
Type: resourcetypes.IntegerProperty,
Description: "The size of the local ephemeral block device, in GB.",
Constraints: []resourcetypes.ConstraintSchema{
{
Range: &resourcetypes.MinMaxConstraint{
Min: 1,
},
},
},
},
"delete_on_termination": {
Type: resourcetypes.BooleanProperty,
Description: "Delete volume on server termination.",
Default: true,
Immutable: true,
},
},
},
},
},
"image_update_policy": {
Type: resourcetypes.StringProperty,
Description: "Policy on how to apply an image-id update.",
Default: "REBUILD",
Constraints: []resourcetypes.ConstraintSchema{
{
AllowedValues: &[]interface{}{
"REBUILD", "REPLACE",
},
},
},
UpdateAllowed: true,
},
},
}
const GetSchemaOutput = `
{
"resource_type": "OS::Test::TestServer",
"support_status": {
"status": "DEPRECATED",
"message": "Bye bye.",
"version": "10.0.0",
"previous_status": {
"status": "SUPPORTED",
"message": null,
"version": null,
"previous_status": null
}
},
"attributes": {
"show": {
"type": "map",
"description": "Detailed information about resource."
},
"tags": {
"type": "list",
"description": "Tags from the server."
},
"name": {
"type": "string",
"description": "Name of the server."
}
},
"properties": {
"name": {
"update_allowed": true,
"required": false,
"type": "string",
"description": "Server name.",
"immutable": false
},
"image": {
"description": "The ID or name of the image to boot with.",
"required": true,
"update_allowed": false,
"type": "string",
"immutable": false,
"constraints": [
{
"custom_constraint": "glance.image"
}
]
},
"block_device_mapping": {
"description": "Block device mappings for this server.",
"required": false,
"update_allowed": false,
"type": "list",
"immutable": false,
"schema": {
"*": {
"update_allowed": false,
"required": false,
"type": "map",
"immutable": false,
"schema": {
"ephemeral_format": {
"description": "The format of the local ephemeral block device.",
"required": false,
"update_allowed": false,
"type": "string",
"immutable": false,
"constraints": [
{
"allowed_values": [
"ext3",
"ext4",
"xfs"
]
}
]
},
"ephemeral_size": {
"description": "The size of the local ephemeral block device, in GB.",
"required": false,
"update_allowed": false,
"type": "integer",
"immutable": false,
"constraints": [
{
"range": {
"min": 1
}
}
]
},
"delete_on_termination": {
"update_allowed": false,
"default": true,
"required": false,
"type": "boolean",
"description": "Delete volume on server termination.",
"immutable": true
}
}
}
}
},
"image_update_policy": {
"description": "Policy on how to apply an image-id update.",
"default": "REBUILD",
"required": false,
"update_allowed": true,
"type": "string",
"immutable": false,
"constraints": [
{
"allowed_values": [
"REBUILD",
"REPLACE"
]
}
]
}
}
}
`
// HandleGetSchemaSuccessfully creates an HTTP handler at
// `/resource_types/OS::Test::TestServer` on the test handler mux that
// responds with a `GetSchema` response.
func HandleGetSchemaSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/resource_types/OS::Test::TestServer",
func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Accept", "application/json")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, GetSchemaOutput)
})
}
const GenerateTemplateOutput = `
{
"outputs": {
"OS::stack_id": {
"value": {
"get_resource": "NoneResource"
}
},
"show": {
"description": "Detailed information about resource.",
"value": {
"get_attr": [
"NoneResource",
"show"
]
}
}
},
"heat_template_version": "2016-10-14",
"description": "Initial template of NoneResource",
"parameters": {},
"resources": {
"NoneResource": {
"type": "OS::Heat::None",
"properties": {}
}
}
}
`
// HandleGenerateTemplateSuccessfully creates an HTTP handler at
// `/resource_types/OS::Heat::None/template` on the test handler mux that
// responds with a template.
func | (t *testing.T) {
th.Mux.HandleFunc("/resource_types/OS::Heat::None/template",
func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
th.TestHeader(t, r, "Accept", "application/json")
w.Header().Set("Content-Type", "application/json")
r.ParseForm()
if r.Form.Get("template_type") == "hot" {
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, GenerateTemplateOutput)
} else {
w.WriteHeader(http.StatusBadRequest)
}
})
}
| HandleGenerateTemplateSuccessfully |
sentencepiece_tokenizer_test.py | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SentencePieceProcessor Tensorflow op."""
import sys
import tempfile
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer
def _utf8(tokens):
if sys.version_info[0] == 2:
return tokens
if isinstance(tokens, list):
return [_utf8(t) for t in tokens]
else:
return tokens.encode('utf-8')
class TestSavedModelModule(module.Module):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None], dtype=dtypes.string)
])
def tokenize(self, inputs):
return self.tokenizer.tokenize(inputs)
@test_util.run_all_in_graph_and_eager_modes
class SentencepieceTokenizerOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def getTokenizerAndSetOptions(self, reverse, add_bos, add_eos, out_type):
self.reverse = reverse
self.add_bos = add_bos
self.add_eos = add_eos
self.out_type = out_type
return SentencepieceTokenizer(
self.model,
reverse=reverse,
add_bos=add_bos,
add_eos=add_eos,
out_type=out_type)
def transformExpected(self, expected, is_offsets=False):
bos = _utf8('<s>')
eos = _utf8('</s>')
if is_offsets:
bos = 0
eos = 0
elif self.out_type == dtypes.int32:
bos = 1
eos = 2
if not isinstance(expected[0], list):
if self.add_bos:
expected = [bos] + expected
if self.add_eos:
expected = expected + [eos]
if self.reverse:
expected = [x for x in reversed(expected)]
else:
return [self.transformExpected(x) for x in expected]
return expected
def setUp(self):
super(SentencepieceTokenizerOpTest, self).setUp()
sentencepiece_model_file = (
'tensorflow_text/python/ops/test_data/'
'test_oss_model.model')
self.model = gfile.GFile(sentencepiece_model_file, 'rb').read()
def testGetVocabSize(self):
sp = SentencepieceTokenizer(self.model)
self.assertAllEqual(1000, sp.vocab_size())
def testIdToStringScalar(self):
sp = SentencepieceTokenizer(self.model)
result = sp.id_to_string(125)
self.assertAllEqual('ve', result)
def testIdToStringVector(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
ids = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
result = sp.id_to_string(ragged_factory_ops.constant(ids))
self.assertAllEqual(pieces, result)
def testIdToStringRagged(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd', 's']]])
ids = [[[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
result = sp.id_to_string(ragged_factory_ops.constant(ids, dtypes.int32))
self.assertAllEqual(pieces, result)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeScalar(self, reverse, add_bos, add_eos,
out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected = []
if out_type == dtypes.int32:
expected = [9, 169, 21, 125, 169, 579, 6]
else:
expected = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected = self.transformExpected(expected)
result = sp.tokenize(sentence)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentence), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeVec(self, reverse, add_bos, add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = ['I love carpet', 'I love desk.', 'I love lamp.']
expected = []
if out_type == dtypes.int32:
expected = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
else:
expected = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
expected = self.transformExpected(expected)
result = sp.tokenize(sentences)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeUniformTensorMatrix(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.'],
['I love lamp.', 'Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6]],
[[9, 169, 21, 125, 169, 579, 6],
[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.']],
[['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'],
['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(constant_op.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeRaggedMatrix(self, reverse, add_bos, add_eos,
| self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6], [9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeWithOffsetsScalar(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected_tok = []
expected_starts = [0, 1, 3, 4, 6, 8, 11]
expected_limits = [1, 3, 4, 6, 8, 11, 12]
if out_type == dtypes.int32:
expected_tok = [9, 169, 21, 125, 169, 579, 6]
else:
expected_tok = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected_tok = self.transformExpected(expected_tok)
expected_starts = self.transformExpected(expected_starts, True)
expected_limits = self.transformExpected(expected_limits, True)
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentence))
self.assertAllEqual(expected_tok, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentence), detokenized)
def testTokenizeAndDetokenizeWithOffsetsSingleElementVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love carpet.', 'I love desk.', 'I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsMatrix(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = [['I love carpet.', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected_tokens = [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[[
'▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o',
'd', 'd', 's'
]]]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]],
[[0, 0, 1, 5, 10, 13, 17, 18, 19, 20, 21]]]
expected_limits = [[[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]],
[[0, 1, 5, 10, 13, 17, 18, 19, 20, 21, 22]]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(-1, 0.1, dtypes.int32),
(64, 0.1, dtypes.int32),
(0, 0.0, dtypes.int32),
(-1, 0.1, dtypes.string),
(64, 0.1, dtypes.string),
(0, 0.0, dtypes.string),
])
def testSampleTokenizeAndDetokenize(self, nbest_size, alpha, out_type):
sp = SentencepieceTokenizer(
self.model, nbest_size=nbest_size, alpha=alpha, out_type=out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
result = sp.tokenize(ragged_factory_ops.constant(sentences))
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
def testSavedModel(self):
sp = SentencepieceTokenizer(self.model)
test_module = TestSavedModelModule(sp)
inputs = constant_op.constant(['hello world'])
expected_result = test_module.tokenize(inputs)
temp_dir = tempfile.mkdtemp(dir=test.get_temp_dir())
save.save(test_module, temp_dir)
restored_model = load.load(temp_dir)
self.assertAllEqual(restored_model.tokenize(inputs), expected_result)
file_io.delete_recursively(temp_dir)
def testBasicPipeline(self):
if not context.executing_eagerly():
self.skipTest('testBasicPipeline only supported in eager mode.')
sp = SentencepieceTokenizer(self.model)
strings = ['hello', 'world']
dataset = dataset_ops.Dataset.from_tensor_slices(strings)
# Ensure we can map the tokenizer across the dataset.
dataset1 = dataset.map(sp.tokenize)
# Ensure there's no error with a second map call.
dataset2 = dataset.map(sp.tokenize)
expected = sp.tokenize(strings)
for i, result in enumerate(dataset1):
self.assertAllEqual(result, expected[i])
for i, result in enumerate(dataset2):
self.assertAllEqual(result, expected[i])
def testEmptyModel(self):
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
sp = SentencepieceTokenizer()
result = sp.tokenize('whatever')
result.eval()
def testInvalidModel(self):
with self.cached_session():
with self.assertRaises(errors.InternalError):
sp = SentencepieceTokenizer('invalid model')
result = sp.tokenize('whatever')
result.eval()
if __name__ == '__main__':
test.main()
| out_type):
sp = |
pdfUtils.py | # -*- coding: utf-8 -*-
"""Various helper methods for PDF extraction.
"""
# This file contains mostly unused leftovers from pdf.py.
class Stream (object):
"""Wrapper around PdfMiner's stream class"""
def | (self, stream):
self.stream = stream
def get(self, attribute):
"""Returns a cleaned up PDF stream attribute value
"""
try:
value = self.stream[attribute]
return str(value).strip("/_").lower()
except Exception:
return None
"""
from pdfminer.pdftypes import resolve1, PDFObjRef
from binascii import b2a_hex
import zlib
from pdfminer.ccitt import ccittfaxdecode
hexadecimal = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,
'7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12,
'd': 13, 'e': 14, 'f': 15}
base85m4 = long(pow(85, 4))
base85m3 = long(pow(85, 3))
base85m2 = long(pow(85, 2))
def get_colormode(color_space, bits=None):
color_mode = None
if isinstance(color_space, list):
color_space_family = _clean_up_stream_attribute(color_space[0])
else:
color_space_family = _clean_up_stream_attribute(color_space)
if color_space_family == "indexed":
color_schema = color_space[1]
if isinstance(color_schema, PDFObjRef):
color_schema = color_schema.resolve()
if isinstance(color_schema, list):
color_schema = color_schema[0]
color_schema = _clean_up_stream_attribute(color_schema)
bits = color_space[2] or bits
if isinstance(bits, PDFObjRef):
bits = bits.resolve()
if color_schema == "devicegray" and bits == 1:
color_mode = "1"
elif color_schema == "devicegray" and bits == 8:
color_mode = "L"
elif color_schema == "iccbased":
# FIXME This just happens to work often enough. We should
# let PDFMiner take care of all this work, though, rather
# than implementníng all the logic (this is complex!) ourselves
color_mode = "L"
elif color_space_family == "pattern":
pass
elif color_space_family == "separation":
pass
elif color_space_family == "devicen":
pass
elif color_space_family == "calgray":
pass
elif color_space_family == "calrgb":
pass
elif color_space_family == "lab":
pass
elif color_space_family == "iccbased":
color_mode = "L"
elif color_space_family == "devicegray":
if bits == 8:
color_mode = "L"
else:
color_mode = "1"
elif color_space_family == "devicergb":
color_mode = "RGB"
elif color_space_family == "devicecmyk":
pass
return color_mode
def _clean_up_stream_attribute(self, attribute):
try:
return str(attribute).strip("/_").lower()
except Exception:
return None
def _decompress(self):
Decompress the image raw data in this image
if self._filter == 'asciihexdecode':
self._raw_data = self._asciihexdecode(self._raw_data)
elif self._filter == 'ascii85decode':
self._raw_data = self._ascii85decode(self._raw_data)
elif self._filter == 'flatedecode':
self._raw_data = zlib.decompress(self._raw_data)
elif self._filter == "ccittfaxdecode":
self._raw_data = ccittfaxdecode(self._raw_data, self._filter_params)
return None
def _determine_image_type(self, stream_first_4_bytes):
Find out the image file type based on the magic number
file_type = None
bytes_as_hex = b2a_hex(stream_first_4_bytes)
if bytes_as_hex.startswith('ffd8'):
file_type = 'jpeg'
elif bytes_as_hex == '89504e47':
file_type = 'png'
elif bytes_as_hex == '47494638':
file_type = 'gif'
elif bytes_as_hex.startswith('424d'):
file_type = 'bmp'
return file_type
def _clean_hexadecimal(self, a):
Read the string, converting the pairs of digits to
characters
b = ''
shift = 4
value = 0
try:
for i in a:
value = value | (hexadecimal[i] << shift)
shift = 4 - shift
if shift == 4:
b = b + chr(value)
value = 0
except ValueError:
raise PDFError("Problem with hexadecimal string %s" % a)
return b
def _asciihexdecode(self, text):
at = text.find('>')
return self._clean_hexadecimal(text[:at].lower())
def _ascii85decode(self, text):
end = text.find('~>')
new = []
i = 0
ch = 0
value = 0
while i < end:
if text[i] == 'z':
if ch != 0:
raise PDFError('Badly encoded ASCII85 format.')
new.append('\000\000\000\000')
ch = 0
value = 0
else:
v = ord(text[i])
if v >= 33 and v <= 117:
if ch == 0:
value = ((v - 33) * base85m4)
elif ch == 1:
value = value + ((v - 33) * base85m3)
elif ch == 2:
value = value + ((v - 33) * base85m2)
elif ch == 3:
value = value + ((v - 33) * 85)
elif ch == 4:
value = value + (v - 33)
c1 = int(value >> 24)
c2 = int((value >> 16) & 255)
c3 = int((value >> 8) & 255)
c4 = int(value & 255)
new.append(chr(c1) + chr(c2) + chr(c3) + chr(c4))
ch = (ch + 1) % 5
i = i + 1
if ch != 0:
c = chr(value >> 24) + chr((value >> 16) & 255) + \
chr((value >> 8) & 255) + chr(value & 255)
new.append(c[:ch - 1])
return "".join(new)
def _get_image(self):
Return an image from this image data.
temp_image = None
image_data = self._stream.get_data()
print "len(image_data)",
print len(image_data)
try:
# Assume war image data
# temp_image = Image.frombuffer(self.color_mode,
# (self.width, self.height),
# self._raw_data, "raw",
# self.color_mode, 0, 1)
temp_image = Image.frombuffer(self.color_mode,
(self.width, self.height),
image_data, "raw",
self.color_mode, 0, 1)
except Exception:
# Not raw image data.
# Can we make sense of this stream some other way?
try:
import StringIO
# temp_image = Image.open(StringIO.StringIO(self._raw_data))
temp_image = Image.open(StringIO.StringIO(image_data))
except Exception:
# PIL failed us. Try to print data to a file, and open it
# file_ext = self._determine_image_type(self._raw_data[0:4])
file_ext = self._determine_image_type(image_data[0:4])
if file_ext:
# TODO use tempfile
file_name = os_sep.join(["header", file_ext])
with open("temp/" + file_name, "w") as image_file:
# image_file.write(self._raw_data)
image_file.write(image_data)
temp_image = Image.open(image_file)
return temp_image or None
"""
"""
if "F" in image_obj.stream:
self._filter = self._clean_up_stream_attribute(image_obj.stream["F"])
else:
self._filter = self._clean_up_stream_attribute(image_obj.stream["Filter"])
if "DP" in image_obj.stream:
self._filter_params = image_obj.stream["DP"]
elif "DecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["DecodeParms"]
elif "FDecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["FDecodeParms"]
self._bits = image_obj.stream["BitsPerComponent"]
self._raw_data = image_obj.stream.get_rawdata()
if self._filter is not None:
self._decompress()
if "CS" in image_obj.stream:
self.colorspace = image_obj.stream["CS"]
elif "ColorSpace" in image_obj.stream:
self.colorspace = image_obj.stream["ColorSpace"]
else:
self.colorspace = "DeviceGray"
if isinstance(self.colorspace, PDFObjRef):
self.colorspace = self.colorspace.resolve()
self.color_mode = self.get_colormode(self.colorspace,
bits=self._bits)
if self.color_mode is None:
print self.colorspace
raise Exception("No method for handling colorspace")
"""
| __init__ |
command.py | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
# Number of projects to submit to a single worker process at a time.
# This number represents a tradeoff between the overhead of IPC and finer
# grained opportunity for parallelism. This particular value was chosen by
# iterating through powers of two until the overall performance no longer
# improved. The performance of this batch size is not a function of the
# number of cores on the system.
WORKER_BATCH_SIZE = 32
# How many jobs to run in parallel by default? This assumes the jobs are
# largely I/O bound and do not hit the network.
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
event_log = EventLog()
manifest = None
_optparse = None
# Whether this command supports running in parallel. If greater than 0,
# it is the number of parallel jobs to default to.
PARALLEL_JOBS = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
epilog = 'Run `repo help %s` to view the detailed manual.' % self.NAME
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
if self.PARALLEL_JOBS is not None:
p.add_option(
'-j', '--jobs',
type=int, default=self.PARALLEL_JOBS,
help='number of jobs to run in parallel (default: %s)' % self.PARALLEL_JOBS)
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while (path and
path != oldpath and
path != manifest.topdir):
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
"""A list of projects that match the arguments.
"""
if not manifest:
|
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested project is
# checked out multiple times or differently based on them.
projects = [project for project in manifest.GetProjectsWithName(arg)
if project.MatchesGroups(groups)]
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError('%s (%s)' % (arg, project.relpath))
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
class GitcAvailableCommand(object):
"""Command that requires GITC to be available, but does
not require the local client to be a GITC client.
"""
class GitcClientCommand(object):
"""Command that requires the local client to be a GITC
client.
"""
| manifest = self.manifest |
lib.rs | use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use std::collections::HashSet;
use syn::{parse_macro_input, parse_quote, spanned::Spanned, DeriveInput, Result};
#[proc_macro_derive(CustomDebug, attributes(debug))]
pub fn derive(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
match custom_debug(input) {
Ok(token) => token,
Err(err) => err.to_compile_error(),
}.into()
}
mod bound;
mod generics;
fn custom_debug(mut input: DeriveInput) -> Result<TokenStream2> {
use syn::{Data, DataStruct, Fields, FieldsNamed};
if let Data::Struct(DataStruct { fields: Fields::Named(FieldsNamed { named, .. }), .. }) = &input.data {
let (ident, generics) = (&input.ident, &mut input.generics);
let mut opt = bound::struct_attr(&input.attrs);
// 构造 fmt 方法内部的标记
let ident_str = ident.to_string();
let field_idents = named.iter().map(|f| f.ident.as_ref().unwrap());
let field_idents_str = field_idents.clone().map(|i| i.to_string());
let field_rhs = field_idents.zip(named.iter().map(|f| f.attrs.as_slice()))
.map(|(i, a)| attr_debug(a, i, &mut opt))
.collect::<Result<Vec<_>>>()?;
// 在某些泛型关联类型的情况下,放宽 T: Debug 约束
let mut associated = HashSet::with_capacity(8);
let (mut bound_where_clause, bound_generics) = opt.unwrap_or_default();
let closure = |g: &mut syn::TypeParam| {
generics::add_debug(g, named.iter().map(|f| &f.ty), &mut associated, &bound_generics)
};
generics.type_params_mut().for_each(closure);
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let mut where_clause = where_clause.cloned().unwrap_or_else(|| parse_quote! { where });
let convert = |ty: &syn::Type| -> syn::WherePredicate { parse_quote!(#ty: ::std::fmt::Debug) };
bound_where_clause.extend(associated.into_iter().map(convert));
where_clause.predicates.extend(bound_where_clause);
Ok(quote! {
impl #impl_generics ::std::fmt::Debug for #ident #ty_generics #where_clause {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::result::Result<(), ::std::fmt::Error> {
f.debug_struct(&#ident_str)
#(
.field(&#field_idents_str, #field_rhs)
)*
.finish()
}
}
})
} else {
Err(syn::Error::new(input.span(), "Named Str | n::Ident, opt_preds_ident: &mut bound::OptPredsIdent)
-> Result<TokenStream2> {
use syn::{Lit, LitStr, Meta, MetaNameValue};
fn debug(attr: &syn::Attribute, opt_preds_ident: &mut bound::OptPredsIdent) -> Option<Result<LitStr>> {
match attr.parse_meta() {
Ok(Meta::NameValue(MetaNameValue { path, lit: Lit::Str(s), .. })) if path.is_ident("debug") => {
Some(Ok(s))
}
Ok(meta) => bound::field_attr(meta, opt_preds_ident),
_ => Some(Err(syn::Error::new(attr.span(), "failed to parse attr meta"))),
}
}
match attrs.iter().find_map(|attr| debug(attr, opt_preds_ident)) {
None => Ok(quote! { &self.#ident }),
Some(Ok(fmt)) => Ok(quote! { &::std::format_args!(#fmt, self.#ident) }),
Some(Err(err)) => Err(err),
}
}
| uct Only :)"))
}
}
fn attr_debug(attrs: &[syn::Attribute], ident: &sy |
coordinator.rs | use crate::integrity::*;
use hdk::prelude::*;
use EntryZomes::*;
#[hdk_dependent_entry_types]
enum EntryZomes {
IntegrityCrd(EntryTypes),
}
#[hdk_extern]
fn create(_: ()) -> ExternResult<HeaderHash> {
create_entry(&IntegrityCrd(EntryTypes::Thing(Thing)))
}
/// `read` seems to be a reserved worked that causes SIGSEGV invalid memory reference when used as `#[hdk_extern]`
#[hdk_extern]
fn reed(header_hash: HeaderHash) -> ExternResult<Option<Element>> {
get(header_hash, GetOptions::latest())
}
#[hdk_extern]
fn delete_via_hash(header_hash: HeaderHash) -> ExternResult<HeaderHash> {
delete_entry(header_hash)
}
#[hdk_extern]
fn delete_via_input(delete_input: DeleteInput) -> ExternResult<HeaderHash> {
delete_entry(delete_input)
}
#[cfg(all(test, feature = "mock"))]
pub mod test {
use ::fixt::prelude::*;
use hdk::prelude::*;
#[test]
fn create_smoke() |
#[test]
fn get_smoke() {
let mut mock_hdk = hdk::prelude::MockHdkT::new();
let input_header_hash = fixt!(HeaderHash);
mock_hdk
.expect_get()
.with(hdk::prelude::mockall::predicate::eq(vec![GetInput::new(
input_header_hash.clone().into(),
GetOptions::latest(),
)]))
.times(1)
.return_once(move |_| Ok(vec![None]));
hdk::prelude::set_hdk(mock_hdk);
let result = super::reed(input_header_hash);
assert_eq!(result, Ok(None))
}
#[test]
fn delete_hash_smoke() {
let mut mock_hdk = hdk::prelude::MockHdkT::new();
let input_header_hash = fixt!(HeaderHash);
let output_header_hash = fixt!(HeaderHash);
let output_header_hash_closure = output_header_hash.clone();
mock_hdk
.expect_delete()
.with(hdk::prelude::mockall::predicate::eq(DeleteInput::new(
input_header_hash.clone(),
ChainTopOrdering::default(),
)))
.times(1)
.return_once(move |_| Ok(output_header_hash_closure));
hdk::prelude::set_hdk(mock_hdk);
let result = super::delete_via_hash(input_header_hash);
assert_eq!(result, Ok(output_header_hash))
}
#[test]
fn delete_input_smoke() {
let mut mock_hdk = hdk::prelude::MockHdkT::new();
let input_header_hash = fixt!(HeaderHash);
let output_header_hash = fixt!(HeaderHash);
let output_header_hash_closure = output_header_hash.clone();
mock_hdk
.expect_delete()
.with(hdk::prelude::mockall::predicate::eq(DeleteInput::new(
input_header_hash.clone(),
ChainTopOrdering::Relaxed,
)))
.times(1)
.return_once(move |_| Ok(output_header_hash_closure));
hdk::prelude::set_hdk(mock_hdk);
let input = DeleteInput {
deletes_header_hash: input_header_hash,
chain_top_ordering: ChainTopOrdering::Relaxed,
};
let result = super::delete_via_input(input);
assert_eq!(result, Ok(output_header_hash))
}
}
| {
let mut mock_hdk = hdk::prelude::MockHdkT::new();
let thing = EntryTypes::Thing(Thing);
let header_hash = fixt!(HeaderHash);
let closure_header_hash = header_hash.clone();
mock_hdk
.expect_create()
.with(hdk::prelude::mockall::predicate::eq(CreateInput {
entry_def_id: thing.entry_def_id(),
entry: thing.try_into().unwrap(),
chain_top_ordering: Default::default(),
}))
.times(1)
.return_once(move |_| Ok(closure_header_hash));
hdk::prelude::set_hdk(mock_hdk);
let result = super::create(());
assert_eq!(result, Ok(header_hash))
} |
invalid-punct-ident-3.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:invalid-punct-ident.rs
#[macro_use]
extern crate invalid_punct_ident; |
invalid_raw_ident!(); //~ ERROR proc macro panicked |
|
issuer.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
apiscertmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
cache "k8s.io/client-go/tools/cache"
versioned "knative.dev/net-certmanager/pkg/client/certmanager/clientset/versioned"
v1 "knative.dev/net-certmanager/pkg/client/certmanager/informers/externalversions/certmanager/v1"
client "knative.dev/net-certmanager/pkg/client/certmanager/injection/client"
filtered "knative.dev/net-certmanager/pkg/client/certmanager/injection/informers/factory/filtered"
certmanagerv1 "knative.dev/net-certmanager/pkg/client/certmanager/listers/certmanager/v1"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Certmanager().V1().Issuers()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
func withDynamicInformer(ctx context.Context) context.Context {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
inf := &wrapper{client: client.Get(ctx), selector: selector}
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
}
return ctx
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.IssuerInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil |
return untyped.(v1.IssuerInformer)
}
type wrapper struct {
client versioned.Interface
namespace string
selector string
}
var _ v1.IssuerInformer = (*wrapper)(nil)
var _ certmanagerv1.IssuerLister = (*wrapper)(nil)
func (w *wrapper) Informer() cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(nil, &apiscertmanagerv1.Issuer{}, 0, nil)
}
func (w *wrapper) Lister() certmanagerv1.IssuerLister {
return w
}
func (w *wrapper) Issuers(namespace string) certmanagerv1.IssuerNamespaceLister {
return &wrapper{client: w.client, namespace: namespace, selector: w.selector}
}
func (w *wrapper) List(selector labels.Selector) (ret []*apiscertmanagerv1.Issuer, err error) {
reqs, err := labels.ParseToRequirements(w.selector)
if err != nil {
return nil, err
}
selector = selector.Add(reqs...)
lo, err := w.client.CertmanagerV1().Issuers(w.namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: selector.String(),
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
if err != nil {
return nil, err
}
for idx := range lo.Items {
ret = append(ret, &lo.Items[idx])
}
return ret, nil
}
func (w *wrapper) Get(name string) (*apiscertmanagerv1.Issuer, error) {
// TODO(mattmoor): Check that the fetched object matches the selector.
return w.client.CertmanagerV1().Issuers(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
}
| {
logging.FromContext(ctx).Panicf(
"Unable to fetch knative.dev/net-certmanager/pkg/client/certmanager/informers/externalversions/certmanager/v1.IssuerInformer with selector %s from context.", selector)
} |
test.rs | use crate::known_deep_size;
use crate::DeepSizeOf;
use alloc::{boxed::Box, string::String, vec};
use core::mem::size_of;
#[test]
fn primitive_types() {
assert_eq!(0u8.deep_size_of(), 1);
assert_eq!(0u16.deep_size_of(), 2);
assert_eq!(0u32.deep_size_of(), 4);
assert_eq!(0u64.deep_size_of(), 8);
assert_eq!(0usize.deep_size_of(), size_of::<usize>());
assert_eq!(0i8.deep_size_of(), 1);
assert_eq!(0i16.deep_size_of(), 2);
assert_eq!(0i32.deep_size_of(), 4);
assert_eq!(0i64.deep_size_of(), 8);
assert_eq!(0isize.deep_size_of(), size_of::<isize>());
assert_eq!(0f32.deep_size_of(), 4);
assert_eq!(0f64.deep_size_of(), 8);
assert_eq!('f'.deep_size_of(), 4);
assert_eq!("Hello World!".deep_size_of(), 12);
assert_eq!((&"Hello World!").deep_size_of(), 16);
assert_eq!(true.deep_size_of(), 1);
}
#[test]
fn boxes() {
let boxed = Box::new(0u32);
assert_eq!(boxed.deep_size_of(), 4 + size_of::<usize>());
}
#[test]
fn arcs() {
use std::sync::Arc;
let test: Arc<[u32]> = vec![1, 2, 3].into();
let multiple = (Arc::clone(&test), Arc::clone(&test), test);
assert_eq!(
multiple.deep_size_of(),
3 * size_of::<Arc<[u32]>>() + 3 * size_of::<u32>()
);
}
#[test]
fn slices() {
let array: Box<[u32]> = vec![0; 64].into_boxed_slice();
assert_eq!(array[5..10].deep_size_of(), 4 * 5);
assert_eq!(array[..32].deep_size_of(), 4 * 32);
assert_eq!(
DeepSizeOf::deep_size_of(&array),
size_of::<usize>() * 2 + size_of::<[u32; 64]>()
);
let array: Box<[u32]> = vec![0; 1000].into_boxed_slice();
assert_eq!(
DeepSizeOf::deep_size_of(&array),
size_of::<usize>() * 2 + size_of::<[u32; 1000]>()
);
}
// TODO: find edge cases
#[test]
fn alignment() {
#[repr(align(256))]
struct | (u8);
known_deep_size!(0; Test);
struct Test2(Test, u8);
known_deep_size!(0; Test2);
let array: [Test; 3] = [Test(5), Test(16), Test(2)];
assert_eq!(size_of::<[Test; 3]>(), array.deep_size_of());
let vec = vec![Test(5), Test(16), Test(2)];
assert_eq!(vec.deep_size_of(), 256 * 3 + 24);
let vec = vec![Test2(Test(5), 0), Test2(Test(16), 0), Test2(Test(2), 0)];
assert_eq!(vec.deep_size_of(), 512 * 3 + 24);
}
#[test]
fn strings() {
let string_a = String::from("01234567");
let string_b = String::from("0123456789012345");
assert_eq!(string_a.deep_size_of(), size_of::<String>() + 8);
assert_eq!(string_b.deep_size_of(), size_of::<String>() + 16);
}
#[test]
fn tuples() {
// Alignment - ######## #.##....
let non_allocating = (45u64, (), (8u8, 16u16));
let text = "Hello World";
let allocating = (Box::new(42u32), String::from(text));
assert_eq!(
non_allocating.deep_size_of(),
size_of::<(u64, (), (u8, u16))>()
);
assert_eq!(
allocating.deep_size_of(),
size_of::<(Box<()>, String)>() + text.len() + size_of::<u32>()
);
}
mod context_tests {
use crate::Context;
#[test]
fn context_arc_test() {
let mut context = Context::new();
let arc = alloc::sync::Arc::new(15);
assert_eq!(context.contains_arc(&arc), false);
context.add_arc(&arc);
assert_eq!(context.contains_arc(&arc), true);
}
#[test]
fn context_rc_test() {
let mut context = Context::new();
let rc = alloc::rc::Rc::new(15);
assert_eq!(context.contains_rc(&rc), false);
context.add_rc(&rc);
assert_eq!(context.contains_rc(&rc), true);
}
}
#[cfg(feature = "derive")]
mod test_derive {
use super::*;
#[test]
fn test_1() {
#[derive(DeepSizeOf)]
struct Example<'a>(&'a u32, &'a u32);
let number = &42;
let example = Example(number, number);
let size = example.deep_size_of();
// Data past references is not counted
assert_eq!(size, 2 * size_of::<usize>());
}
#[test]
fn test_enum() {
#[derive(DeepSizeOf)]
enum ExampleEnum {
One,
Two(),
Three(u32, Box<u8>),
Four { name: Box<u32> },
Five {},
}
let variant_one = ExampleEnum::One;
let variant_two = ExampleEnum::Two();
let variant_three = ExampleEnum::Three(0, Box::new(255));
let variant_four = ExampleEnum::Four {
name: Box::new(65536),
};
let variant_five = ExampleEnum::Five {};
assert_eq!(variant_one.deep_size_of(), size_of::<ExampleEnum>());
assert_eq!(variant_two.deep_size_of(), size_of::<ExampleEnum>());
assert_eq!(
variant_three.deep_size_of(),
size_of::<ExampleEnum>() + size_of::<u8>()
);
assert_eq!(
variant_four.deep_size_of(),
size_of::<ExampleEnum>() + size_of::<u32>()
);
assert_eq!(variant_five.deep_size_of(), size_of::<ExampleEnum>());
}
}
| Test |
build.rs | use std::path::Path;
extern crate cc;
fn main() | {
let src_dir = Path::new("src");
let mut c_config = cc::Build::new();
c_config.include(&src_dir);
c_config
.flag_if_supported("-Wno-unused-parameter")
.flag_if_supported("-Wno-unused-but-set-variable")
.flag_if_supported("-Wno-trigraphs");
let parser_path = src_dir.join("parser.c");
c_config.file(&parser_path);
let scanner_path = src_dir.join("scanner.c");
c_config.file(&scanner_path);
println!("cargo:rerun-if-changed={}", parser_path.to_str().unwrap());
c_config.compile("parser-scanner");
} |
|
pcc_lpspi1.rs | #[doc = "Register `PCC_LPSPI1` reader"]
pub struct R(crate::R<PCC_LPSPI1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PCC_LPSPI1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<PCC_LPSPI1_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<PCC_LPSPI1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PCC_LPSPI1` writer"]
pub struct W(crate::W<PCC_LPSPI1_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PCC_LPSPI1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<PCC_LPSPI1_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<PCC_LPSPI1_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Peripheral Clock Source Select\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PCS_A {
#[doc = "0: Clock is off."]
CLOCK_OFF = 0,
#[doc = "1: Clock option 1"]
CLOCK_OPT1 = 1,
#[doc = "2: Clock option 2"]
CLOCK_OPT2 = 2,
#[doc = "3: Clock option 3"]
CLOCK_OPT3 = 3,
#[doc = "4: Clock option 4"]
CLOCK_OPT4 = 4,
#[doc = "5: Clock option 5"]
CLOCK_OPT5 = 5,
#[doc = "6: Clock option 6"]
CLOCK_OPT6 = 6,
#[doc = "7: Clock option 7"]
CLOCK_OPT7 = 7,
}
impl From<PCS_A> for u8 {
#[inline(always)]
fn from(variant: PCS_A) -> Self {
variant as _
}
}
#[doc = "Field `PCS` reader - Peripheral Clock Source Select"]
pub struct PCS_R(crate::FieldReader<u8, PCS_A>);
impl PCS_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
PCS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PCS_A {
match self.bits {
0 => PCS_A::CLOCK_OFF,
1 => PCS_A::CLOCK_OPT1,
2 => PCS_A::CLOCK_OPT2,
3 => PCS_A::CLOCK_OPT3,
4 => PCS_A::CLOCK_OPT4,
5 => PCS_A::CLOCK_OPT5,
6 => PCS_A::CLOCK_OPT6,
7 => PCS_A::CLOCK_OPT7,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `CLOCK_OFF`"]
#[inline(always)]
pub fn is_clock_off(&self) -> bool {
**self == PCS_A::CLOCK_OFF
}
#[doc = "Checks if the value of the field is `CLOCK_OPT1`"]
#[inline(always)]
pub fn is_clock_opt1(&self) -> bool {
**self == PCS_A::CLOCK_OPT1
}
#[doc = "Checks if the value of the field is `CLOCK_OPT2`"]
#[inline(always)]
pub fn is_clock_opt2(&self) -> bool {
**self == PCS_A::CLOCK_OPT2
}
#[doc = "Checks if the value of the field is `CLOCK_OPT3`"]
#[inline(always)]
pub fn is_clock_opt3(&self) -> bool {
**self == PCS_A::CLOCK_OPT3
}
#[doc = "Checks if the value of the field is `CLOCK_OPT4`"]
#[inline(always)]
pub fn is_clock_opt4(&self) -> bool {
**self == PCS_A::CLOCK_OPT4
}
#[doc = "Checks if the value of the field is `CLOCK_OPT5`"]
#[inline(always)]
pub fn is_clock_opt5(&self) -> bool {
**self == PCS_A::CLOCK_OPT5
}
#[doc = "Checks if the value of the field is `CLOCK_OPT6`"]
#[inline(always)]
pub fn is_clock_opt6(&self) -> bool {
**self == PCS_A::CLOCK_OPT6
}
#[doc = "Checks if the value of the field is `CLOCK_OPT7`"]
#[inline(always)]
pub fn is_clock_opt7(&self) -> bool {
**self == PCS_A::CLOCK_OPT7
}
}
impl core::ops::Deref for PCS_R {
type Target = crate::FieldReader<u8, PCS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PCS` writer - Peripheral Clock Source Select"]
pub struct PCS_W<'a> {
w: &'a mut W,
}
impl<'a> PCS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PCS_A) -> &'a mut W {
self.bits(variant.into())
}
#[doc = "Clock is off."]
#[inline(always)]
pub fn clock_off(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OFF) | #[inline(always)]
pub fn clock_opt1(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT1)
}
#[doc = "Clock option 2"]
#[inline(always)]
pub fn clock_opt2(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT2)
}
#[doc = "Clock option 3"]
#[inline(always)]
pub fn clock_opt3(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT3)
}
#[doc = "Clock option 4"]
#[inline(always)]
pub fn clock_opt4(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT4)
}
#[doc = "Clock option 5"]
#[inline(always)]
pub fn clock_opt5(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT5)
}
#[doc = "Clock option 6"]
#[inline(always)]
pub fn clock_opt6(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT6)
}
#[doc = "Clock option 7"]
#[inline(always)]
pub fn clock_opt7(self) -> &'a mut W {
self.variant(PCS_A::CLOCK_OPT7)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 24)) | ((value as u32 & 0x07) << 24);
self.w
}
}
#[doc = "Clock Gate Control\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CGC_A {
#[doc = "0: Clock disabled. The current clock selection and divider options are not locked and can be modified."]
DISABLED = 0,
#[doc = "1: Clock enabled. The current clock selection and divider options are locked and cannot be modified."]
ENABLED = 1,
}
impl From<CGC_A> for bool {
#[inline(always)]
fn from(variant: CGC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `CGC` reader - Clock Gate Control"]
pub struct CGC_R(crate::FieldReader<bool, CGC_A>);
impl CGC_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
CGC_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CGC_A {
match self.bits {
false => CGC_A::DISABLED,
true => CGC_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == CGC_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == CGC_A::ENABLED
}
}
impl core::ops::Deref for CGC_R {
type Target = crate::FieldReader<bool, CGC_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CGC` writer - Clock Gate Control"]
pub struct CGC_W<'a> {
w: &'a mut W,
}
impl<'a> CGC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CGC_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clock disabled. The current clock selection and divider options are not locked and can be modified."]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(CGC_A::DISABLED)
}
#[doc = "Clock enabled. The current clock selection and divider options are locked and cannot be modified."]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CGC_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30);
self.w
}
}
#[doc = "Present\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PR_A {
#[doc = "0: Peripheral is not present."]
NOT_PRESENT = 0,
#[doc = "1: Peripheral is present."]
PRESENT = 1,
}
impl From<PR_A> for bool {
#[inline(always)]
fn from(variant: PR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `PR` reader - Present"]
pub struct PR_R(crate::FieldReader<bool, PR_A>);
impl PR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
PR_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PR_A {
match self.bits {
false => PR_A::NOT_PRESENT,
true => PR_A::PRESENT,
}
}
#[doc = "Checks if the value of the field is `NOT_PRESENT`"]
#[inline(always)]
pub fn is_not_present(&self) -> bool {
**self == PR_A::NOT_PRESENT
}
#[doc = "Checks if the value of the field is `PRESENT`"]
#[inline(always)]
pub fn is_present(&self) -> bool {
**self == PR_A::PRESENT
}
}
impl core::ops::Deref for PR_R {
type Target = crate::FieldReader<bool, PR_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 24:26 - Peripheral Clock Source Select"]
#[inline(always)]
pub fn pcs(&self) -> PCS_R {
PCS_R::new(((self.bits >> 24) & 0x07) as u8)
}
#[doc = "Bit 30 - Clock Gate Control"]
#[inline(always)]
pub fn cgc(&self) -> CGC_R {
CGC_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Present"]
#[inline(always)]
pub fn pr(&self) -> PR_R {
PR_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 24:26 - Peripheral Clock Source Select"]
#[inline(always)]
pub fn pcs(&mut self) -> PCS_W {
PCS_W { w: self }
}
#[doc = "Bit 30 - Clock Gate Control"]
#[inline(always)]
pub fn cgc(&mut self) -> CGC_W {
CGC_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "PCC LPSPI1 Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pcc_lpspi1](index.html) module"]
pub struct PCC_LPSPI1_SPEC;
impl crate::RegisterSpec for PCC_LPSPI1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [pcc_lpspi1::R](R) reader structure"]
impl crate::Readable for PCC_LPSPI1_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [pcc_lpspi1::W](W) writer structure"]
impl crate::Writable for PCC_LPSPI1_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PCC_LPSPI1 to value 0x8000_0000"]
impl crate::Resettable for PCC_LPSPI1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x8000_0000
}
} | }
#[doc = "Clock option 1"] |
index.tsx | import * as React from 'react'
import * as ReactDOM from 'react-dom'
import { App } from './components/App/index'
import './styles.css'
if ('serviceWorker' in navigator) {
navigator.serviceWorker.register('./sw.js')
.then(() => navigator.serviceWorker.ready.then((worker) => {
worker.sync.register('syncdata'); | .catch((err) => console.log(err));
}
const root = document.getElementById('app')
ReactDOM.render(
(<App />),
root
) | })) |
train_model.py | # Thanks: https://machinelearningmastery.com/how-to-develop-a-cnn-from-scratch-for-fashion-mnist-clothing-classification/
# model with double the filters for the fashion mnist dataset
import cv2
import glob
import argparse
import numpy as np
from numpy import mean
from numpy import std
from numpy import argmax
from matplotlib import pyplot
from sklearn.model_selection import KFold
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras.models import load_model
from keras.utils import to_categorical
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.layers import Conv2D, Dropout, MaxPooling2D, Dense, Flatten
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--h', type=int, default=48)
parser.add_argument('--w', type=int, default=48)
args = parser.parse_args()
# define dnn model (simple)
def define_model(number_classes):
model = Sequential()
# model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='he_uniform', input_shape=(args.h, args.w, 1)))
# model.add(MaxPooling2D((2, 2)))
model.add(Flatten(input_shape=(args.h, args.w, 1)))
model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))
# model.add(Dropout(0.2))
model.add(Dense(500, activation='relu', kernel_initializer='he_uniform'))
# model.add(Dropout(0.2))
model.add(Dense(number_classes, activation='softmax'))
opt = Adam(lr=0.0001)
# compile model
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# get the classes
def get_classes(dataset):
# Get the class names from the folder names
classes = glob.glob(dataset)
classes.sort()
for i in range(len(classes)):
classes[i] = classes[i][:-1]
pos = classes[i].rfind('/')
classes[i] = classes[i][pos+1:]
return classes
# load and prepare the image
def load_image(filename):
# load the image
img = load_img(filename, grayscale=True, target_size=(args.h, args.w))
# convert to array
img = img_to_array(img)
# reshape into a single sample with 1 channel
img = img.reshape(1, args.h, args.w, 1)
# prepare pixel data
img = img.astype('float32')
img = img / 255.0
return img
# convert a folder to an array
def | (file_names, classes):
x = []
y = []
for f in file_names:
# Create data
image = load_image(f)
x.append(image)
# Create label
label = []
# Get the subfolder
folder_name = f
pos = folder_name.rfind('/')
folder_name = folder_name[:pos]
pos = folder_name.rfind('/')
folder_name = folder_name[pos+1:]
# Check if the name is in the subfolder
for c in classes:
if c in folder_name:
label.append(1)
else:
label.append(0)
y.append(label)
x = np.array(x, dtype='float64')
y = np.array(y, dtype='int64')
return x, y
# load the dataset from the folders
def load_dataset():
# Get the classes
classes = get_classes("./training_data/*/")
print("Classes: " + str(classes))
# Create the training data
training_files = glob.glob ("./training_data/*/*.jp*") # your image path
trainX, trainY = folder_to_array(training_files, classes)
# Create the testing data
testing_files = glob.glob ("./testing_data/*/*.jp*") # your image path
testX, testY = folder_to_array(testing_files, classes)
# Shuffle the data
idx = np.random.permutation(len(trainX))
trainX, trainY = trainX[idx], trainY[idx]
trainX = trainX.reshape((trainX.shape[0], args.h, args.w, 1))
testX = testX.reshape((testX.shape[0], args.h, args.w, 1))
print("Training data shape: " + str(trainX.shape))
print("Training label shape: " + str(trainY.shape))
print("Test data shape: " + str(testX.shape))
print("Test label shape: " + str(testY.shape))
return trainX, trainY, testX, testY
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(111)
pyplot.title('Classification Accuracy')
pyplot.plot(history.history['acc'], color='blue', label='training accuracy')
pyplot.plot(history.history['val_acc'], color='orange', label='validation accuracy')
pyplot.legend()
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the training and save a model
def run_training():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# define model
model = define_model(number_classes=len(testY[0]))
# Define early stopping
callback = EarlyStopping(monitor="val_acc", patience=250)
# fit model
history = model.fit(trainX, trainY, epochs=args.epochs, batch_size=args.batch_size, verbose=1, validation_split=0.1, shuffle=True, callbacks=[callback])
# save model
print(model.summary())
model.save('marine_model.h5')
# Display the training data
summarize_diagnostics(history)
# run for evaluating a model
def run_testing():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# load model
model = load_model('marine_model.h5')
# evaluate model on test dataset
_, acc = model.evaluate(testX, testY, verbose=1)
print('Test Accuracy: ' + str(acc * 100.0))
# load an image and predict the class
def run_single_image():
classes = get_classes("./training_data/*/")
# load model
model = load_model('marine_model.h5')
# For all images in single_prediction
sample_images = glob.glob("./testing_data/*.jp*")
for img_name in sample_images:
# Load the image
image = load_image(img_name)
# predict the class
prediction = model.predict(image)
result = argmax(prediction, axis=-1)
print('Single image class (' + img_name + '): ' + str(classes[result[0]]))
# Running the code
run_training()
run_testing()
run_single_image()
| folder_to_array |
api_mock.go | package diskmaker
import (
localv1 "github.com/openshift/local-storage-operator/pkg/apis/local/v1"
"github.com/openshift/local-storage-operator/pkg/apis/local/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
)
// MockAPIUpdater mocks all the ApiUpdater Commands
type MockAPIUpdater struct {
events []*DiskEvent
MockGetDiscoveryResult func(name, namespace string) (*v1alpha1.LocalVolumeDiscoveryResult, error)
MockCreateDiscoveryResult func(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error
MockUpdateDiscoveryResultStatus func(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error
MockUpdateDiscoveryResult func(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error
MockGetLocalVolumeDiscovery func(name, namespace string) (*v1alpha1.LocalVolumeDiscovery, error)
}
var _ ApiUpdater = &MockAPIUpdater{}
func (f *MockAPIUpdater) recordEvent(obj runtime.Object, e *DiskEvent) {
f.events = append(f.events, e)
}
func (f *MockAPIUpdater) getLocalVolume(lv *localv1.LocalVolume) (*localv1.LocalVolume, error) {
return lv, nil
}
// GetDiscoveryResult mocks GetDiscoveryResult
func (f *MockAPIUpdater) GetDiscoveryResult(name, namespace string) (*v1alpha1.LocalVolumeDiscoveryResult, error) {
if f.MockGetDiscoveryResult != nil {
return f.MockGetDiscoveryResult(name, namespace)
}
return &v1alpha1.LocalVolumeDiscoveryResult{}, nil
}
// CreateDiscoveryResult mocks CreateDiscoveryResult
func (f *MockAPIUpdater) CreateDiscoveryResult(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error {
if f.MockCreateDiscoveryResult != nil {
return f.MockCreateDiscoveryResult(lvdr)
}
return nil
}
// UpdateDiscoveryResultStatus mocks UpdateDiscoveryResultStatus
func (f *MockAPIUpdater) UpdateDiscoveryResultStatus(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error {
if f.MockUpdateDiscoveryResultStatus != nil {
return f.MockUpdateDiscoveryResultStatus(lvdr)
}
return nil
}
// UpdateDiscoveryResult mocks UpdateDiscoveryResult
func (f *MockAPIUpdater) UpdateDiscoveryResult(lvdr *v1alpha1.LocalVolumeDiscoveryResult) error {
if f.MockUpdateDiscoveryResult != nil |
return nil
}
// GetLocalVolumeDiscovery mocks GetLocalVolumeDiscovery
func (f *MockAPIUpdater) GetLocalVolumeDiscovery(name, namespace string) (*v1alpha1.LocalVolumeDiscovery, error) {
if f.MockGetLocalVolumeDiscovery != nil {
return f.MockGetLocalVolumeDiscovery(name, namespace)
}
return &v1alpha1.LocalVolumeDiscovery{}, nil
}
| {
return f.MockUpdateDiscoveryResult(lvdr)
} |
CheckDomainSunriseClaimRequest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CheckDomainSunriseClaimRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CheckDomainSunriseClaim')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def | (self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | get_Lang |
handler.go | package api
import (
"net/http"
"path/filepath"
"time"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/api/artifactserver"
"github.com/concourse/concourse/atc/api/buildserver"
"github.com/concourse/concourse/atc/api/ccserver"
"github.com/concourse/concourse/atc/api/cliserver"
"github.com/concourse/concourse/atc/api/configserver"
"github.com/concourse/concourse/atc/api/containerserver"
"github.com/concourse/concourse/atc/api/infoserver"
"github.com/concourse/concourse/atc/api/jobserver"
"github.com/concourse/concourse/atc/api/loglevelserver"
"github.com/concourse/concourse/atc/api/pipelineserver"
"github.com/concourse/concourse/atc/api/resourceserver"
"github.com/concourse/concourse/atc/api/resourceserver/versionserver"
"github.com/concourse/concourse/atc/api/teamserver"
"github.com/concourse/concourse/atc/api/usersserver"
"github.com/concourse/concourse/atc/api/volumeserver"
"github.com/concourse/concourse/atc/api/wallserver"
"github.com/concourse/concourse/atc/api/workerserver"
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/gc"
"github.com/concourse/concourse/atc/mainredirect"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/wrappa"
"github.com/tedsuo/rata"
)
func NewHandler(
logger lager.Logger,
externalURL string,
clusterName string,
wrapper wrappa.Wrappa,
dbTeamFactory db.TeamFactory,
dbPipelineFactory db.PipelineFactory,
dbJobFactory db.JobFactory,
dbResourceFactory db.ResourceFactory,
dbWorkerFactory db.WorkerFactory,
workerTeamFactory db.TeamFactory,
volumeRepository db.VolumeRepository,
containerRepository db.ContainerRepository,
destroyer gc.Destroyer,
dbBuildFactory db.BuildFactory,
dbCheckFactory db.CheckFactory,
dbResourceConfigFactory db.ResourceConfigFactory,
dbUserFactory db.UserFactory,
eventHandlerFactory buildserver.EventHandlerFactory,
workerClient worker.Client,
sink *lager.ReconfigurableSink,
isTLSEnabled bool,
cliDownloadsDir string,
version string,
workerVersion string,
secretManager creds.Secrets,
varSourcePool creds.VarSourcePool,
credsManagers creds.Managers,
interceptTimeoutFactory containerserver.InterceptTimeoutFactory,
interceptUpdateInterval time.Duration,
dbWall db.Wall,
clock clock.Clock,
) (http.Handler, error) {
absCLIDownloadsDir, err := filepath.Abs(cliDownloadsDir)
if err != nil {
return nil, err
}
pipelineHandlerFactory := pipelineserver.NewScopedHandlerFactory(dbTeamFactory)
buildHandlerFactory := buildserver.NewScopedHandlerFactory(logger)
teamHandlerFactory := NewTeamScopedHandlerFactory(logger, dbTeamFactory)
buildServer := buildserver.NewServer(logger, externalURL, dbTeamFactory, dbBuildFactory, eventHandlerFactory)
jobServer := jobserver.NewServer(logger, externalURL, secretManager, dbJobFactory, dbCheckFactory)
resourceServer := resourceserver.NewServer(logger, secretManager, varSourcePool, dbCheckFactory, dbResourceFactory, dbResourceConfigFactory)
versionServer := versionserver.NewServer(logger, externalURL)
pipelineServer := pipelineserver.NewServer(logger, dbTeamFactory, dbPipelineFactory, externalURL)
configServer := configserver.NewServer(logger, dbTeamFactory, secretManager)
ccServer := ccserver.NewServer(logger, dbTeamFactory, externalURL)
workerServer := workerserver.NewServer(logger, workerTeamFactory, dbWorkerFactory)
logLevelServer := loglevelserver.NewServer(logger, sink)
cliServer := cliserver.NewServer(logger, absCLIDownloadsDir)
containerServer := containerserver.NewServer(logger, workerClient, secretManager, varSourcePool, interceptTimeoutFactory, interceptUpdateInterval, containerRepository, destroyer, clock)
volumesServer := volumeserver.NewServer(logger, volumeRepository, destroyer)
teamServer := teamserver.NewServer(logger, dbTeamFactory, externalURL)
infoServer := infoserver.NewServer(logger, version, workerVersion, externalURL, clusterName, credsManagers)
artifactServer := artifactserver.NewServer(logger, workerClient)
usersServer := usersserver.NewServer(logger, dbUserFactory)
wallServer := wallserver.NewServer(dbWall, logger)
handlers := map[string]http.Handler{
atc.GetConfig: http.HandlerFunc(configServer.GetConfig),
atc.SaveConfig: http.HandlerFunc(configServer.SaveConfig),
atc.GetCC: http.HandlerFunc(ccServer.GetCC),
atc.ListBuilds: http.HandlerFunc(buildServer.ListBuilds),
atc.CreateBuild: teamHandlerFactory.HandlerFor(buildServer.CreateBuild),
atc.GetBuild: buildHandlerFactory.HandlerFor(buildServer.GetBuild),
atc.BuildResources: buildHandlerFactory.HandlerFor(buildServer.BuildResources),
atc.AbortBuild: buildHandlerFactory.HandlerFor(buildServer.AbortBuild),
atc.GetBuildPlan: buildHandlerFactory.HandlerFor(buildServer.GetBuildPlan),
atc.GetBuildPreparation: buildHandlerFactory.HandlerFor(buildServer.GetBuildPreparation),
atc.BuildEvents: buildHandlerFactory.HandlerFor(buildServer.BuildEvents),
atc.ListBuildArtifacts: buildHandlerFactory.HandlerFor(buildServer.GetBuildArtifacts),
atc.ListAllJobs: http.HandlerFunc(jobServer.ListAllJobs),
atc.ListJobs: pipelineHandlerFactory.HandlerFor(jobServer.ListJobs),
atc.GetJob: pipelineHandlerFactory.HandlerFor(jobServer.GetJob),
atc.ListJobBuilds: pipelineHandlerFactory.HandlerFor(jobServer.ListJobBuilds),
atc.ListJobInputs: pipelineHandlerFactory.HandlerFor(jobServer.ListJobInputs),
atc.GetJobBuild: pipelineHandlerFactory.HandlerFor(jobServer.GetJobBuild),
atc.CreateJobBuild: pipelineHandlerFactory.HandlerFor(jobServer.CreateJobBuild),
atc.RerunJobBuild: pipelineHandlerFactory.HandlerFor(jobServer.RerunJobBuild),
atc.PauseJob: pipelineHandlerFactory.HandlerFor(jobServer.PauseJob),
atc.UnpauseJob: pipelineHandlerFactory.HandlerFor(jobServer.UnpauseJob),
atc.ScheduleJob: pipelineHandlerFactory.HandlerFor(jobServer.ScheduleJob),
atc.JobBadge: pipelineHandlerFactory.HandlerFor(jobServer.JobBadge),
atc.MainJobBadge: mainredirect.Handler{
Routes: atc.Routes,
Route: atc.JobBadge,
},
atc.ClearTaskCache: pipelineHandlerFactory.HandlerFor(jobServer.ClearTaskCache),
atc.ListAllPipelines: http.HandlerFunc(pipelineServer.ListAllPipelines),
atc.ListPipelines: http.HandlerFunc(pipelineServer.ListPipelines),
atc.GetPipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.GetPipeline),
atc.DeletePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.DeletePipeline),
atc.OrderPipelines: http.HandlerFunc(pipelineServer.OrderPipelines),
atc.PausePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.PausePipeline),
atc.ArchivePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.ArchivePipeline),
atc.UnpausePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.UnpausePipeline),
atc.ExposePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.ExposePipeline),
atc.HidePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.HidePipeline),
atc.GetVersionsDB: pipelineHandlerFactory.HandlerFor(pipelineServer.GetVersionsDB),
atc.RenamePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.RenamePipeline),
atc.ListPipelineBuilds: pipelineHandlerFactory.HandlerFor(pipelineServer.ListPipelineBuilds),
atc.CreatePipelineBuild: pipelineHandlerFactory.HandlerFor(pipelineServer.CreateBuild),
atc.PipelineBadge: pipelineHandlerFactory.HandlerFor(pipelineServer.PipelineBadge),
atc.ListAllResources: http.HandlerFunc(resourceServer.ListAllResources), | atc.UnpinResource: pipelineHandlerFactory.HandlerFor(resourceServer.UnpinResource),
atc.SetPinCommentOnResource: pipelineHandlerFactory.HandlerFor(resourceServer.SetPinCommentOnResource),
atc.CheckResource: pipelineHandlerFactory.HandlerFor(resourceServer.CheckResource),
atc.CheckResourceWebHook: pipelineHandlerFactory.HandlerFor(resourceServer.CheckResourceWebHook),
atc.CheckResourceType: pipelineHandlerFactory.HandlerFor(resourceServer.CheckResourceType),
atc.ListResourceVersions: pipelineHandlerFactory.HandlerFor(versionServer.ListResourceVersions),
atc.GetResourceVersion: pipelineHandlerFactory.HandlerFor(versionServer.GetResourceVersion),
atc.EnableResourceVersion: pipelineHandlerFactory.HandlerFor(versionServer.EnableResourceVersion),
atc.DisableResourceVersion: pipelineHandlerFactory.HandlerFor(versionServer.DisableResourceVersion),
atc.PinResourceVersion: pipelineHandlerFactory.HandlerFor(versionServer.PinResourceVersion),
atc.ListBuildsWithVersionAsInput: pipelineHandlerFactory.HandlerFor(versionServer.ListBuildsWithVersionAsInput),
atc.ListBuildsWithVersionAsOutput: pipelineHandlerFactory.HandlerFor(versionServer.ListBuildsWithVersionAsOutput),
atc.GetResourceCausality: pipelineHandlerFactory.HandlerFor(versionServer.GetCausality),
atc.ListWorkers: http.HandlerFunc(workerServer.ListWorkers),
atc.RegisterWorker: http.HandlerFunc(workerServer.RegisterWorker),
atc.LandWorker: http.HandlerFunc(workerServer.LandWorker),
atc.RetireWorker: http.HandlerFunc(workerServer.RetireWorker),
atc.PruneWorker: http.HandlerFunc(workerServer.PruneWorker),
atc.HeartbeatWorker: http.HandlerFunc(workerServer.HeartbeatWorker),
atc.DeleteWorker: http.HandlerFunc(workerServer.DeleteWorker),
atc.SetLogLevel: http.HandlerFunc(logLevelServer.SetMinLevel),
atc.GetLogLevel: http.HandlerFunc(logLevelServer.GetMinLevel),
atc.DownloadCLI: http.HandlerFunc(cliServer.Download),
atc.GetInfo: http.HandlerFunc(infoServer.Info),
atc.GetInfoCreds: http.HandlerFunc(infoServer.Creds),
atc.GetUser: http.HandlerFunc(usersServer.GetUser),
atc.ListActiveUsersSince: http.HandlerFunc(usersServer.GetUsersSince),
atc.ListContainers: teamHandlerFactory.HandlerFor(containerServer.ListContainers),
atc.GetContainer: teamHandlerFactory.HandlerFor(containerServer.GetContainer),
atc.HijackContainer: teamHandlerFactory.HandlerFor(containerServer.HijackContainer),
atc.ListDestroyingContainers: http.HandlerFunc(containerServer.ListDestroyingContainers),
atc.ReportWorkerContainers: http.HandlerFunc(containerServer.ReportWorkerContainers),
atc.ListVolumes: teamHandlerFactory.HandlerFor(volumesServer.ListVolumes),
atc.ListDestroyingVolumes: http.HandlerFunc(volumesServer.ListDestroyingVolumes),
atc.ReportWorkerVolumes: http.HandlerFunc(volumesServer.ReportWorkerVolumes),
atc.ListTeams: http.HandlerFunc(teamServer.ListTeams),
atc.GetTeam: http.HandlerFunc(teamServer.GetTeam),
atc.SetTeam: http.HandlerFunc(teamServer.SetTeam),
atc.RenameTeam: http.HandlerFunc(teamServer.RenameTeam),
atc.DestroyTeam: http.HandlerFunc(teamServer.DestroyTeam),
atc.ListTeamBuilds: http.HandlerFunc(teamServer.ListTeamBuilds),
atc.CreateArtifact: teamHandlerFactory.HandlerFor(artifactServer.CreateArtifact),
atc.GetArtifact: teamHandlerFactory.HandlerFor(artifactServer.GetArtifact),
atc.GetWall: http.HandlerFunc(wallServer.GetWall),
atc.SetWall: http.HandlerFunc(wallServer.SetWall),
atc.ClearWall: http.HandlerFunc(wallServer.ClearWall),
}
return rata.NewRouter(atc.Routes, wrapper.Wrap(handlers))
} | atc.ListResources: pipelineHandlerFactory.HandlerFor(resourceServer.ListResources),
atc.ListResourceTypes: pipelineHandlerFactory.HandlerFor(resourceServer.ListVersionedResourceTypes),
atc.GetResource: pipelineHandlerFactory.HandlerFor(resourceServer.GetResource), |
xdg_desktop_portal.rs | use std::path::PathBuf;
use crate::backend::DialogFutureType;
use crate::file_dialog::Filter;
use crate::{FileDialog, FileHandle};
use ashpd::desktop::file_chooser::{
FileChooserProxy, FileFilter, OpenFileOptions, SaveFileOptions,
};
// TODO: convert raw_window_handle::RawWindowHandle to ashpd::WindowIdentifier
// https://github.com/bilelmoussaoui/ashpd/issues/40
use ashpd::{zbus, WindowIdentifier};
use log::warn;
use pollster::block_on;
//
// Utility functions
//
fn add_filters_to_open_file_options(
filters: Vec<Filter>,
mut options: OpenFileOptions,
) -> OpenFileOptions {
for filter in &filters {
let mut ashpd_filter = FileFilter::new(&filter.name);
for file_extension in &filter.extensions {
ashpd_filter = ashpd_filter.glob(&format!("*.{}", file_extension));
}
options = options.add_filter(ashpd_filter);
}
options
}
fn add_filters_to_save_file_options(
filters: Vec<Filter>,
mut options: SaveFileOptions,
) -> SaveFileOptions {
for filter in &filters {
let mut ashpd_filter = FileFilter::new(&filter.name);
for file_extension in &filter.extensions {
ashpd_filter = ashpd_filter.glob(&format!("*.{}", file_extension));
}
options = options.add_filter(ashpd_filter);
}
options
}
// refer to https://github.com/flatpak/xdg-desktop-portal/issues/213
fn uri_to_pathbuf(uri: &str) -> Option<PathBuf> {
uri.strip_prefix("file://").map(PathBuf::from)
}
fn ok_or_warn<T, E: std::fmt::Debug>(result: Result<T, E>) -> Option<T> {
match result {
Err(e) => {
warn!("{:?}", e);
None
}
Ok(t) => Some(t),
}
}
async fn file_chooser_proxy<'a>() -> Option<FileChooserProxy<'a>> {
let connection = ok_or_warn(zbus::Connection::session().await)?;
ok_or_warn(FileChooserProxy::new(&connection).await)
}
//
// File Picker
//
use crate::backend::FilePickerDialogImpl;
impl FilePickerDialogImpl for FileDialog {
fn pick_file(self) -> Option<PathBuf> {
block_on(self.pick_file_async()).map(PathBuf::from)
}
fn pick_files(self) -> Option<Vec<PathBuf>> {
block_on(self.pick_files_async())
.map(|vec_file_handle| vec_file_handle.iter().map(PathBuf::from).collect())
}
}
use crate::backend::AsyncFilePickerDialogImpl;
impl AsyncFilePickerDialogImpl for FileDialog {
fn pick_file_async(self) -> DialogFutureType<Option<FileHandle>> {
Box::pin(async {
let proxy = file_chooser_proxy().await?;
let mut options = OpenFileOptions::default()
.accept_label("Pick file")
.multiple(false);
options = add_filters_to_open_file_options(self.filters, options);
let selected_files = proxy
.open_file(
&WindowIdentifier::default(),
&self.title.unwrap_or_else(|| "Pick a file".to_string()),
options,
)
.await;
if selected_files.is_err() {
return None;
}
uri_to_pathbuf(&selected_files.unwrap().uris()[0]).map(FileHandle::from)
})
}
fn pick_files_async(self) -> DialogFutureType<Option<Vec<FileHandle>>> {
Box::pin(async {
let proxy = file_chooser_proxy().await?;
let mut options = OpenFileOptions::default()
.accept_label("Pick file(s)")
.multiple(true);
options = add_filters_to_open_file_options(self.filters, options);
let selected_files = proxy
.open_file(
&WindowIdentifier::default(),
&self
.title
.unwrap_or_else(|| "Pick one or more files".to_string()),
options,
)
.await;
if selected_files.is_err() {
return None;
}
let selected_files = selected_files
.unwrap()
.uris()
.iter()
.filter_map(|string| uri_to_pathbuf(string))
.map(FileHandle::from)
.collect::<Vec<FileHandle>>();
if selected_files.is_empty() {
return None;
}
Some(selected_files)
})
}
}
//
// Folder Picker
//
use crate::backend::FolderPickerDialogImpl;
impl FolderPickerDialogImpl for FileDialog {
fn pick_folder(self) -> Option<PathBuf> {
block_on(self.pick_folder_async()).map(PathBuf::from)
}
}
use crate::backend::AsyncFolderPickerDialogImpl;
impl AsyncFolderPickerDialogImpl for FileDialog {
fn pick_folder_async(self) -> DialogFutureType<Option<FileHandle>> |
}
//
// File Save
//
use crate::backend::FileSaveDialogImpl;
impl FileSaveDialogImpl for FileDialog {
fn save_file(self) -> Option<PathBuf> {
block_on(self.save_file_async()).map(PathBuf::from)
}
}
use crate::backend::AsyncFileSaveDialogImpl;
impl AsyncFileSaveDialogImpl for FileDialog {
fn save_file_async(self) -> DialogFutureType<Option<FileHandle>> {
Box::pin(async {
let proxy = file_chooser_proxy().await?;
let mut options = SaveFileOptions::default().accept_label("Save");
options = add_filters_to_save_file_options(self.filters, options);
if let Some(file_name) = self.file_name {
options = options.current_name(&file_name);
}
// TODO: impl zvariant::Type for PathBuf?
// if let Some(dir) = self.starting_directory {
// options.current_folder(dir);
// }
let selected_files = proxy
.save_file(
&WindowIdentifier::default(),
&self.title.unwrap_or_else(|| "Save file".to_string()),
options,
)
.await;
if selected_files.is_err() {
return None;
}
uri_to_pathbuf(&selected_files.unwrap().uris()[0]).map(FileHandle::from)
})
}
}
| {
Box::pin(async {
let proxy = file_chooser_proxy().await?;
let mut options = OpenFileOptions::default()
.accept_label("Pick folder")
.multiple(false)
.directory(true);
options = add_filters_to_open_file_options(self.filters, options);
let selected_files = proxy
.open_file(
&WindowIdentifier::default(),
&self.title.unwrap_or_else(|| "Pick a folder".to_string()),
options,
)
.await;
if selected_files.is_err() {
return None;
}
uri_to_pathbuf(&selected_files.unwrap().uris()[0]).map(FileHandle::from)
})
} |
push_up_exercise_name.rs | use crate::fields::FieldContent;
use serde::Serialize;
#[derive(Clone, Copy, Debug, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum PushUpExerciseName {
AlternatingHandsMedicineBallPushUp,
AlternatingStaggeredPushUp,
BosuBallPushUp,
ChestPressWithBand,
ClappingPushUp,
CloseGripMedicineBallPushUp,
CloseHandsPushUp,
DeclinePushUp,
DiamondPushUp,
ExplosiveCrossoverPushUp,
ExplosivePushUp,
FeetElevatedSideToSidePushUp,
HandReleasePushUp,
HandstandPushUp,
InclinePushUp,
IsometricExplosivePushUp,
JudoPushUp,
KneelingPushUp,
MedicineBallChestPass,
MedicineBallPushUp,
OneArmPushUp,
ParalletteHandstandPushUp,
PilatesPushup,
PushUp,
PushUpAndRow,
PushUpPlus,
PushUpWithFeetOnSwissBall,
PushUpWithOneHandOnMedicineBall,
RingHandstandPushUp,
RingPushUp,
ShoulderPushUp,
SingleArmMedicineBallPushUp,
SpidermanPushUp,
StackedFeetPushUp,
StaggeredHandsPushUp,
SuspendedPushUp,
SwissBallPushUp,
SwissBallPushUpPlus,
TPushUp,
TripleStopPushUp,
WeightedAlternatingHandsMedicineBallPushUp,
WeightedAlternatingStaggeredPushUp,
WeightedBosuBallPushUp,
WeightedClappingPushUp,
WeightedCloseGripMedicineBallPushUp,
WeightedCloseHandsPushUp,
WeightedDeclinePushUp,
WeightedDiamondPushUp,
WeightedExplosiveCrossoverPushUp,
WeightedExplosivePushUp,
WeightedFeetElevatedSideToSidePushUp,
WeightedHandReleasePushUp,
WeightedHandstandPushUp,
WeightedInclinePushUp,
WeightedIsometricExplosivePushUp,
WeightedJudoPushUp,
WeightedKneelingPushUp,
WeightedMedicineBallPushUp,
WeightedOneArmPushUp,
WeightedParalletteHandstandPushUp,
WeightedPushUp,
WeightedPushUpAndRow,
WeightedPushUpPlus,
WeightedPushUpWithFeetOnSwissBall,
WeightedPushUpWithOneHandOnMedicineBall,
WeightedRingHandstandPushUp,
WeightedRingPushUp,
WeightedShoulderPushUp,
WeightedSingleArmMedicineBallPushUp,
WeightedSpidermanPushUp,
WeightedStackedFeetPushUp,
WeightedStaggeredHandsPushUp,
WeightedSuspendedPushUp,
WeightedSwissBallPushUp,
WeightedSwissBallPushUpPlus,
WeightedTPushUp,
WeightedTripleStopPushUp,
WeightedWideHandsPushUp,
WideHandsPushUp,
UnknownValue(u64),
}
impl From<FieldContent> for PushUpExerciseName {
fn from(field: FieldContent) -> Self |
}
| {
if let FieldContent::UnsignedInt16(enum_value) = field {
match enum_value {
0 => PushUpExerciseName::ChestPressWithBand,
1 => PushUpExerciseName::AlternatingStaggeredPushUp,
2 => PushUpExerciseName::WeightedAlternatingStaggeredPushUp,
3 => PushUpExerciseName::AlternatingHandsMedicineBallPushUp,
4 => PushUpExerciseName::WeightedAlternatingHandsMedicineBallPushUp,
5 => PushUpExerciseName::BosuBallPushUp,
6 => PushUpExerciseName::WeightedBosuBallPushUp,
7 => PushUpExerciseName::ClappingPushUp,
8 => PushUpExerciseName::WeightedClappingPushUp,
9 => PushUpExerciseName::CloseGripMedicineBallPushUp,
10 => PushUpExerciseName::WeightedCloseGripMedicineBallPushUp,
11 => PushUpExerciseName::CloseHandsPushUp,
12 => PushUpExerciseName::WeightedCloseHandsPushUp,
13 => PushUpExerciseName::DeclinePushUp,
14 => PushUpExerciseName::WeightedDeclinePushUp,
15 => PushUpExerciseName::DiamondPushUp,
16 => PushUpExerciseName::WeightedDiamondPushUp,
17 => PushUpExerciseName::ExplosiveCrossoverPushUp,
18 => PushUpExerciseName::WeightedExplosiveCrossoverPushUp,
19 => PushUpExerciseName::ExplosivePushUp,
20 => PushUpExerciseName::WeightedExplosivePushUp,
21 => PushUpExerciseName::FeetElevatedSideToSidePushUp,
22 => PushUpExerciseName::WeightedFeetElevatedSideToSidePushUp,
23 => PushUpExerciseName::HandReleasePushUp,
24 => PushUpExerciseName::WeightedHandReleasePushUp,
25 => PushUpExerciseName::HandstandPushUp,
26 => PushUpExerciseName::WeightedHandstandPushUp,
27 => PushUpExerciseName::InclinePushUp,
28 => PushUpExerciseName::WeightedInclinePushUp,
29 => PushUpExerciseName::IsometricExplosivePushUp,
30 => PushUpExerciseName::WeightedIsometricExplosivePushUp,
31 => PushUpExerciseName::JudoPushUp,
32 => PushUpExerciseName::WeightedJudoPushUp,
33 => PushUpExerciseName::KneelingPushUp,
34 => PushUpExerciseName::WeightedKneelingPushUp,
35 => PushUpExerciseName::MedicineBallChestPass,
36 => PushUpExerciseName::MedicineBallPushUp,
37 => PushUpExerciseName::WeightedMedicineBallPushUp,
38 => PushUpExerciseName::OneArmPushUp,
39 => PushUpExerciseName::WeightedOneArmPushUp,
40 => PushUpExerciseName::WeightedPushUp,
41 => PushUpExerciseName::PushUpAndRow,
42 => PushUpExerciseName::WeightedPushUpAndRow,
43 => PushUpExerciseName::PushUpPlus,
44 => PushUpExerciseName::WeightedPushUpPlus,
45 => PushUpExerciseName::PushUpWithFeetOnSwissBall,
46 => PushUpExerciseName::WeightedPushUpWithFeetOnSwissBall,
47 => PushUpExerciseName::PushUpWithOneHandOnMedicineBall,
48 => PushUpExerciseName::WeightedPushUpWithOneHandOnMedicineBall,
49 => PushUpExerciseName::ShoulderPushUp,
50 => PushUpExerciseName::WeightedShoulderPushUp,
51 => PushUpExerciseName::SingleArmMedicineBallPushUp,
52 => PushUpExerciseName::WeightedSingleArmMedicineBallPushUp,
53 => PushUpExerciseName::SpidermanPushUp,
54 => PushUpExerciseName::WeightedSpidermanPushUp,
55 => PushUpExerciseName::StackedFeetPushUp,
56 => PushUpExerciseName::WeightedStackedFeetPushUp,
57 => PushUpExerciseName::StaggeredHandsPushUp,
58 => PushUpExerciseName::WeightedStaggeredHandsPushUp,
59 => PushUpExerciseName::SuspendedPushUp,
60 => PushUpExerciseName::WeightedSuspendedPushUp,
61 => PushUpExerciseName::SwissBallPushUp,
62 => PushUpExerciseName::WeightedSwissBallPushUp,
63 => PushUpExerciseName::SwissBallPushUpPlus,
64 => PushUpExerciseName::WeightedSwissBallPushUpPlus,
65 => PushUpExerciseName::TPushUp,
66 => PushUpExerciseName::WeightedTPushUp,
67 => PushUpExerciseName::TripleStopPushUp,
68 => PushUpExerciseName::WeightedTripleStopPushUp,
69 => PushUpExerciseName::WideHandsPushUp,
70 => PushUpExerciseName::WeightedWideHandsPushUp,
71 => PushUpExerciseName::ParalletteHandstandPushUp,
72 => PushUpExerciseName::WeightedParalletteHandstandPushUp,
73 => PushUpExerciseName::RingHandstandPushUp,
74 => PushUpExerciseName::WeightedRingHandstandPushUp,
75 => PushUpExerciseName::RingPushUp,
76 => PushUpExerciseName::WeightedRingPushUp,
77 => PushUpExerciseName::PushUp,
78 => PushUpExerciseName::PilatesPushup,
n => PushUpExerciseName::UnknownValue(n as u64),
}
} else {
panic!("can't convert PushUpExerciseName to {:?}", field);
}
} |
day14.rs | use itertools::{Itertools, MinMaxResult};
use std::{cell::RefCell, collections::HashMap, fs, rc::Rc};
type Rules = HashMap<(char, char), char>;
type CharCount = HashMap<char, u64>;
type CacheKey0 = (char, char, char);
type CacheInner = HashMap<(CacheKey0, u8), CharCount>;
type Cache = Rc<RefCell<CacheInner>>;
pub fn solution() {
let content = fs::read_to_string("inputs/2021/14.txt").unwrap();
let mut lines = content.lines();
let template = lines.next().unwrap();
let rules = lines.skip(1).fold(Rules::new(), |mut acc, line| {
let mut parts = line.split(" -> ");
let from = {
let mut s = parts.next().unwrap().chars();
let a = s.next().unwrap();
let b = s.next().unwrap();
(a, b)
};
let to = parts.next().unwrap().chars().next().unwrap();
acc.insert(from, to);
acc
});
println!("diff 10: {}", run(template, 10, &rules));
println!("diff 40: {}", run(template, 40, &rules));
}
fn | (template: &str, end: u8, rules: &Rules) -> u64 {
let cache = Rc::new(RefCell::new(HashMap::new()));
let mut result = template
.chars()
.into_iter()
.tuple_windows()
.fold(CharCount::new(), |mut acc, (a, b, c)| {
ins((a, b, c), 0, end, &rules, &cache).into_iter().for_each(|(k, v)| {
*acc.entry(k).or_insert(0) += v;
});
acc
});
// need to remove the overlapped ones
template
.chars()
.into_iter()
.skip(1)
.tuple_windows()
.for_each(|(a, b, _)| {
let mid_template = (a, rules.get(&(a, b)).unwrap().clone(), b);
ins(mid_template, 1, end, &rules, &cache)
.into_iter()
.for_each(|(k, v)| {
*result.get_mut(&k).unwrap() -= v;
});
});
if let MinMaxResult::MinMax(a, z) = result.into_iter().minmax_by_key(|a| a.1) {
z.1 - a.1
} else {
unimplemented!()
}
}
fn ins(template: (char, char, char), step: u8, end: u8, rules: &Rules, cache: &Cache) -> CharCount {
if let Some(ret) = cache.borrow().get(&(template, step)) {
return ret.clone();
}
let mut c = CharCount::new();
if step == end {
*c.entry(template.0).or_insert(0) += 1;
*c.entry(template.1).or_insert(0) += 1;
*c.entry(template.2).or_insert(0) += 1;
} else {
let left_template = (
template.0,
rules.get(&(template.0, template.1)).unwrap().clone(),
template.1,
);
let lc = ins(left_template, step + 1, end, rules, &cache.clone());
lc.into_iter().for_each(|(k, v)| *c.entry(k).or_insert(0) += v);
let right_template = (
template.1,
rules.get(&(template.1, template.2)).unwrap().clone(),
template.2,
);
let rc = ins(right_template, step + 1, end, rules, &cache.clone());
rc.into_iter().for_each(|(k, v)| *c.entry(k).or_insert(0) += v);
*c.get_mut(&template.1).unwrap() -= 1;
}
cache.borrow_mut().insert((template, step), c.clone());
c
}
| run |
args.py | import os, sys
sys.path.append("C:\\BERTVision\\code\\torch")
import torch
import models.args
def | ():
# retreive the general models.args and attach them here
parser = models.args.get_args()
# set search specific args
parser.add_argument('--model',
type=str,
default='MSR',
required=True)
parser.add_argument('--checkpoint',
type=str,
default='bert-base-uncased',
required=True,
help='A HuggingFace checkpoint e.g., bert-base-uncased')
parser.add_argument('--num-labels',
default=2,
type=int)
parser.add_argument('--max-seq-length',
default=86,
type=int,
help='Tokenization max length')
parser.add_argument('--save-path',
type=str,
default=os.path.join('model_checkpoints'))
parser.add_argument('--log-path',
type=str,
default=os.path.join('model_logs'))
parser.add_argument('--warmup-proportion',
default=0.1,
type=float,
help='Proportion of training to perform linear learning rate warmup for')
parser.add_argument('--batch-size',
type=int,
default=16,
help='input batch size for training (default: 16)')
parser.add_argument('--lr',
type=float,
default=1e-5,
help='learning rate (default: 1e-5)')
parser.add_argument('--num-workers',
type=int,
default=0,
help='Number of CPU cores (default: 0)')
parser.add_argument('--shard',
type=float,
default=0.10,
help='Percentage of training set to sample from')
args = parser.parse_args()
return args
#
| get_args |
create_folder.py | import os
def create_folder(initial_dir, nb, service):
directory = os.path.join(initial_dir, nb)
directory = os.path.join(directory, service)
if not os.path.exists(directory):
os.makedirs(directory)
print('Created patient directory -- ' + directory)
else:
|
return directory
| print('Directory -- {} -- already exists'.format(directory)) |
slackrequest.py | import json
import platform
import requests
import six
import sys
from .version import __version__
class | (object):
def __init__(
self,
proxies=None
):
# HTTP configs
self.custom_user_agent = None
self.proxies = proxies
# Construct the user-agent header with the package info, Python version and OS version.
self.default_user_agent = {
# __name__ returns all classes, we only want the client
"client": "{0}/{1}".format(__name__.split('.')[0], __version__),
"python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info),
"system": "{0}/{1}".format(platform.system(), platform.release())
}
def get_user_agent(self):
# Check for custom user-agent and append if found
if self.custom_user_agent:
custom_ua_list = ["/".join(client_info) for client_info in self.custom_user_agent]
custom_ua_string = " ".join(custom_ua_list)
self.default_user_agent['custom'] = custom_ua_string
# Concatenate and format the user-agent string to be passed into request headers
ua_string = []
for key, val in self.default_user_agent.items():
ua_string.append(val)
user_agent_string = " ".join(ua_string)
return user_agent_string
def append_user_agent(self, name, version):
if self.custom_user_agent:
self.custom_user_agent.append([name.replace("/", ":"), version.replace("/", ":")])
else:
self.custom_user_agent = [[name, version]]
def do(self, token=None, request="?", post_data=None,
as_user=None, domain="slack.com", timeout=None):
"""
Perform a POST request to the Slack Web API
Args:
token (str): your authentication token
request (str): the method to call from the Slack API. For example: 'channels.list'
post_data (dict): key/value arguments to pass for the request. For example:
{'channel': 'CABC12345'}
as_user (str): if using a workspace app, the user_id of the user to act on behalf of
domain (str): if for some reason you want to send your request to something other
than slack.com
timeout (float): stop waiting for a response after a given number of seconds
"""
# Pull `file` out so it isn't JSON encoded like normal fields.
# Only do this for requests that are UPLOADING files; downloading files
# use the 'file' argument to point to a File ID.
post_data = post_data or {}
# Move singular file objects into `files`
upload_requests = ['files.upload']
# Move file content into requests' `files` param
files = None
if request in upload_requests:
files = {'file': post_data.pop('file')} if 'file' in post_data else None
# Check for plural fields and convert them to comma-separated strings if needed
for field in {'channels', 'users', 'types'} & set(post_data.keys()):
if isinstance(post_data[field], list):
post_data[field] = ",".join(post_data[field])
# Convert any params which are list-like to JSON strings
# Example: `attachments` is a dict, and needs to be passed as JSON
for k, v in six.iteritems(post_data):
if isinstance(v, (list, dict)):
post_data[k] = json.dumps(v)
return self.post_http_request(token, request, post_data, as_user, files, timeout, domain)
def post_http_request(self, token, api_method, post_data,
as_user=None, files=None, timeout=None, domain="slack.com"):
"""
This method build and submits the Web API HTTP request
:param token: You app's Slack access token
:param api_method: The API method endpoint to submit the request to
:param post_data: The request payload
:param as_user: The user_id if using a workspace app on behalf of a user
:param files: Any files to be submitted during upload calls
:param timeout: Stop waiting for a response after a given number of seconds
:param domain: The URL to submit the API request to
:return:
"""
# Override token header if `token` is passed in post_data
if post_data is not None and "token" in post_data:
token = post_data['token']
# Set user-agent and auth headers
headers = {
'user-agent': self.get_user_agent(),
'Authorization': 'Bearer {}'.format(token)
}
if as_user:
headers["X-Slack-User"] = as_user
# Submit the request
res = requests.post(
'https://{0}/api/{1}'.format(domain, api_method),
headers=headers,
data=post_data,
files=files,
timeout=timeout,
proxies=self.proxies
)
return res
| SlackRequest |
node-class.js | /**
* Created by xbh 2019-07-31 类
*/
//NodeClass 类
class NodeClass {
//构造函数
constructor(x, y) {
//类变量
this.x = x;
this.y = y;
}
//自定义函数
tostring() {
return 'x:' + this.x + ',y:' + this.y;
}
//静态函数
static say(name) {
this.para = name;
return 'Hello ' + name;
}
}
//静态变量
NodeClass.para = 'developer';
module.exports = NodeClass;
let demo = new NodeClass(1, 6);
console.log(demo.tostring());
console.log(NodeClass.say('param'));
console.log(NodeClass.para);
/**
x:1,y:6
Hello param | param
*/ |
|
icon_monitor.rs |
pub struct IconMonitor {
props: crate::Props,
}
impl yew::Component for IconMonitor {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn | (&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><rect fill="none" height="24" width="24" y="0"/></g><g><g><path d="M22,3H2v15h5l-1,1v2h12v-2l-1-1h5V3z M20,16H4V5h16V16z"/></g></g></svg>
</svg>
}
}
}
| change |
task.go | package process
import (
"errors"
"ferry/pkg/pagination"
"ferry/tools"
"ferry/tools/app"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/gin-gonic/gin"
uuid "github.com/satori/go.uuid"
"github.com/spf13/viper"
)
/*
@Author : lanyulei
*/
// 任务列表
func TaskList(c *gin.Context) {
var (
err error
pageValue pagination.ListRequest
taskName string
taskData []map[string]interface{}
totalCount int
)
taskName = c.DefaultQuery("name", "")
err = c.ShouldBind(&pageValue)
if err != nil {
app.Error(c, -1, err, "")
return
}
if pageValue.Page == 0 {
pageValue.Page = 1
}
if pageValue.PerPage == 0 {
pageValue.PerPage = 10
}
getFileDetails := func(fn string) map[string]interface{} {
file := make(map[string]interface{})
fileClassify := strings.Split(fn, ".")
fileDetails := strings.Split(fileClassify[0], "-")
switch fileClassify[1] {
case "py":
file["classify"] = "Python"
case "sh":
file["classify"] = "Shell"
default:
file["classify"] = "Other"
}
if len(fileDetails) == 3 {
file["name"] = fileDetails[0]
file["uuid"] = fileDetails[1]
file["creator"] = fileDetails[2]
}
file["full_name"] = fn
return file
}
files, _ := ioutil.ReadDir(viper.GetString("script.path"))
var endIndex int
if taskName != "" {
for _, f := range files {
if strings.Contains(strings.Split(f.Name(), "-")[0], taskName) {
taskData = append(taskData, getFileDetails(f.Name()))
}
}
totalCount = len(taskData)
if pageValue.Page*pageValue.PerPage > len(taskData) {
endIndex = len(taskData)
} else {
endIndex = pageValue.Page * pageValue.PerPage
}
taskData = taskData[(pageValue.Page-1)*pageValue.PerPage : endIndex]
} else {
if pageValue.Page*pageValue.PerPage > len(files) {
endIndex = len(files)
} else {
endIndex = pageValue.Page * pageValue.PerPage
}
for _, f := range files[(pageValue.Page-1)*pageValue.PerPage : endIndex] {
taskData = append(taskData, getFileDetails(f.Name()))
}
totalCount = len(files)
}
app.OK(c, map[string]interface{}{
"data": taskData,
"page": pageValue.Page,
"per_page": pageValue.PerPage,
"total_count": totalCount,
}, "")
}
// 创建任务
func CreateTask(c *gin.Context) {
var (
err error
taskValue struct {
Name string `json:"name"`
Classify string `json:"classify"`
Content string `json:"content"`
}
)
err = c.ShouldBind(&taskValue)
if err != nil {
app.Error(c, -1, err, "")
return
}
uuidValue := uuid.Must(uuid.NewV4(), err)
fileName := fmt.Sprintf("%v/%v-%v-%v",
viper.GetString("script.path"),
taskValue.Name,
strings.Split(uuidValue.String(), "-")[4],
tools.GetUserName(c),
)
if taskValue.Classify == "python" {
fileName = fileName + ".py"
} else if taskValue.Classify == "shell" {
fileName = fileName + ".sh"
}
err = ioutil.WriteFile(fileName, []byte(taskValue.Content), 0666)
if err != nil {
app.Error(c, -1, err, fmt.Sprintf("创建任务脚本失败: %v", err.Error()))
return
}
app.OK(c, "", "任务创建成功")
}
// 更新任务
func UpdateTask(c *gin.Context) {
var (
err error
file struct {
Name string `json:"name"`
FullName string `json:"full_name"`
Classify string `json:"classify"`
Content string `json:"content"`
}
)
err = c.ShouldBind(&file)
if err != nil {
app.Error(c, -1, err, "")
return
}
fullNameList := strings.Split(file.FullName, "-")
if fullNameList[0] != file.Name {
fullNameList[0] = file.Name
}
var suffixName string
if strings.ToLower(file.Classify) == "python" {
suffixName = ".py"
} else if strings.ToLower(file.Classify) == "shell" {
suffixName = ".sh"
}
if fullNameList[len(fullNameList)-1][len(fullNameList[len(fullNameList)-1])-3:len(fullNameList[len(fullNameList)-1])] != suffixName {
tList := strings.Split(fullNameList[len(fullNameList)-1], ".")
tList[len(tList)-1] = suffixName[1:len(suffixName)]
fullNameList[len(fullNameList)-1] = strings.Join(tList, ".")
}
fileFullName := strings.Join(fullNameList, "-")
// 修改文件内容
err = ioutil.WriteFile(fmt.Sprintf("%v/%v", viper.GetString("script.path"), fileFullName), []byte(file.Content), 0666)
if err != nil {
app.Error(c, -1, err, fmt.Sprintf("更新脚本文件失败,%v", err.Error()))
return
}
// 修改文件名称
err = os.Rename(
fmt.Sprintf("%v/%v", viper.GetString("script.path"), file.FullName),
fmt.Sprintf("%v/%v", viper.GetString("script.path"), fileFullName),
)
if err != nil {
app.Error(c, -1, err, fmt.Sprintf("更改脚本文件名称失败,%v", err.Error()))
return
}
app.OK(c, "", "更新成功")
}
// 删除任务
func DeleteTask(c *gin.Context) {
fullName := c.DefaultQuery("full_name", "")
if fullName == "" {
app.Error(c, -1, errors.New("参数不正确,请确定参数full_name是否传递"), "")
return
}
err := os.Remove(fmt.Sprintf("%v/%v", viper.GetString("script.path"), fullName))
if err != nil {
app.Error(c, -1, err, fmt.Sprintf("删除文件失败,%v", err.Error()))
return
}
app.OK(c, nil, "任务删除成功")
}
// 任务详情
func TaskDetails(c *gin.Context) {
var (
err error
fileName string
content []byte
)
fileName = c.DefaultQuery("file_name", "")
if fileName == "" {
app.Error(c, -1, errors.New("参数不正确,请确认file_name参数是否存在"), "")
return
}
content, err = ioutil.ReadFile(fmt.Sprintf("%v/%v", viper.GetString("script.path"), fileName))
if err != nil { |
app.OK(c, string(content), "")
} | return
} |
assets.go | // Code generated by go-bindata. DO NOT EDIT.
// sources:
// bindata/assets/10-eksctl.al2.conf (1.025kB)
// bindata/assets/bootstrap.al2.sh (755B)
// bindata/assets/bootstrap.helper.sh (1.403kB)
// bindata/assets/bootstrap.legacy.al2.sh (1.286kB)
// bindata/assets/bootstrap.legacy.ubuntu.sh (2.275kB)
// bindata/assets/bootstrap.ubuntu.sh (597B)
// bindata/assets/efa.al2.sh (351B)
// bindata/assets/efa.managed.boothook (484B)
// bindata/assets/install-ssm.al2.sh (159B)
// bindata/assets/kubelet.yaml (480B)
package bindata
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _bindataAssets10EksctlAl2Conf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x52\xdf\x6b\xdb\x30\x10\x7e\xf7\x5f\x71\x90\x3e\x6c\x10\xd9\xac\x7d\x2b\xf8\xc1\x8b\xdd\x12\x96\x3a\x25\x76\xb7\xc1\x36\x8c\x2c\x5d\xd2\x23\xb2\x64\x24\x39\x69\x57\xf2\xbf\x0f\xc7\xf1\x96\xb2\x32\xf6\x26\xdd\x77\xf7\x7d\xdf\xfd\x98\x00\x6e\x9d\xf0\x8a\xb9\x16\x05\xad\x49\x80\x7b\x76\x1e\x1b\x09\xd2\x9a\x96\x91\x86\x4e\x93\x87\xb5\xb1\xb0\xed\x6a\x54\xe8\xa7\xc7\x4f\xd2\xf0\x9f\x46\xc3\x82\x74\xf7\x04\x97\xf0\x2e\x59\x5c\xbe\x0f\x26\x50\x2e\xd3\x25\xa4\xd8\x5a\x14\xdc\xa3\x9c\xc2\x9e\x94\x82\x1a\xc1\x62\x63\x76\x28\xc1\x19\xa3\x83\xe0\x5b\x81\x76\x47\x02\x7f\x04\x13\x58\x18\xc1\x15\x34\xe8\xb9\xe4\x9e\x43\xcb\x2d\x6f\xd0\xa3\x75\xd7\xb0\xca\x6e\xe7\xcb\x7c\x0a\xc9\x97\xa2\x4a\xb3\x9b\xe4\x61\x51\x56\x43\x2c\xc8\xf4\x8e\xac\xd1\x0d\x6a\x7f\x43\x0a\xe3\x08\xbd\x88\x86\x56\xa2\x91\x2b\x44\xbd\x0b\x26\x70\xab\x4c\xcd\x15\x70\x2d\xc1\x79\xee\x49\xbc\xd2\x98\x2d\x1e\x8a\x32\x5b\x55\x69\x5e\x4c\x21\x5f\xa6\x59\xb5\x48\x3e\x66\x8b\xf1\x53\x26\xf3\xbc\x2c\xfe\x29\x77\x9a\xcb\x49\x6d\x68\x47\x1b\xcd\xde\x10\x3b\x52\xce\xef\xa7\x30\xcf\x8b\x32\xc9\x67\x59\x35\x4f\xff\x8b\x5b\xf5\xac\x47\x85\x20\x7b\x42\x51\x78\x6e\x7d\x7c\xf6\x8c\x3a\x67\xa3\x9a\xf4\x58\x00\xdf\x03\x00\xc6\xb4\x91\xc8\xa8\x8d\x2f\x5e\x4e\xca\x87\x73\x40\xf1\x1a\x95\x1b\xc1\xa1\xed\xc3\x94\xab\xf6\x91\x87\x83\x7e\x48\x26\x22\xed\x3c\xd7\x02\x19\xc9\xf8\xe2\xe5\xcc\xf8\xc8\xd5\xf0\x27\xd6\x1a\xd9\x13\xdd\x25\x5f\xab\xfb\x65\x5a\x8c\x90\xc5\x0d\x39\x8f\xf6\xa8\x17\x7b\xdb\xe1\x79\x70\x4f\xfe\x91\x79\x4e\xda\xff\x36\x31\x8c\x7b\x2c\x17\xca\x74\x92\xb5\xd6\xec\x48\xa2\x8d\xf9\xde\x8d\x80\xd1\x7d\x1d\x5a\x66\x3b\xed\xa9\xc1\x58\x1a\xb1\x45\x3b\x76\x87\x7e\x6f\xec\x96\xb5\xaa\xdb\x90\x8e\x85\xa6\xb1\x4e\x13\xab\x49\x33\x49\x36\x8e\x4c\xeb\x23\xa1\xa9\x1f\xdb\x19\x2c\x8c\x5e\x0f\x78\xbf\x86\x1e\xd7\xe8\x43\x79\xca\x68\x8d\x64\xa4\xd7\x96\x9f\x59\xa0\x86\x6f\x30\xbe\x78\xe9\xaf\x34\xfb\x54\x54\xd9\x6c\x55\x25\xb3\xd9\xf2\x21\x2f\x0f\xa1\xdc\xda\x10\x85\x0d\x07\xf8\xf5\x11\x1f\x4e\xd1\x22\x5b\x7d\x9e\xcf\xb2\xa2\x4a\x97\x77\xc9\x3c\x3f\xf4\xcb\x8f\x5a\xde\x39\xbc\xbe\x0a\xaf\x18\x6e\x5d\xdd\x91\x92\xe1\x87\x93\x89\x7e\xc7\xbd\x4d\xda\xfc\x75\x2b\x43\x38\x7c\xe6\x8d\xfa\x33\xaa\xb7\x12\xfb\xa3\xea\xb3\x82\x5f\x01\x00\x00\xff\xff\x04\xfc\xe9\x45\x01\x04\x00\x00")
func bindataAssets10EksctlAl2ConfBytes() ([]byte, error) {
return bindataRead(
_bindataAssets10EksctlAl2Conf,
"bindata/assets/10-eksctl.al2.conf",
)
}
func bindataAssets10EksctlAl2Conf() (*asset, error) {
bytes, err := bindataAssets10EksctlAl2ConfBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/10-eksctl.al2.conf", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb0, 0x2b, 0x5f, 0x5a, 0xc4, 0x94, 0x74, 0x90, 0x34, 0x26, 0x3b, 0xc9, 0x7, 0x3f, 0x89, 0x8f, 0xe9, 0x6f, 0xe5, 0x33, 0x58, 0x2b, 0x82, 0x48, 0xd, 0xca, 0x2b, 0x91, 0x5f, 0x6f, 0x4a, 0xab}}
return a, nil
}
var _bindataAssetsBootstrapAl2Sh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x92\xd1\x6a\xdb\x4c\x10\x85\xef\xf7\x29\xe6\xd7\x1f\x30\x14\x56\x6a\x21\xe4\xa2\x17\x05\xc5\x55\x83\xa9\xa3\x04\x59\x2e\x81\x34\x88\xd5\x6a\x6c\x6f\x2c\xed\xaa\x3b\x23\x93\x12\xfc\xee\x65\xdb\x5a\x71\x9d\x5c\xee\xcc\x77\xce\xcc\x1c\xf6\xff\xff\x92\xda\xd8\xa4\x56\xb4\x11\x82\x90\x41\x3a\x40\xef\xf1\xc9\xf0\xe1\xd9\x9b\x1e\x57\xca\xb4\x87\xb7\x75\x83\x25\x64\x21\xc8\x0d\x5e\x23\x24\x3b\xe5\x93\xd6\xd4\x89\x6e\xdd\xd0\x24\xa4\xbd\xe9\x99\x12\xdc\x92\xe6\x36\xa9\x9d\x63\x62\xaf\xfa\x78\x83\x6d\x8f\x3e\x0e\x83\x50\x6f\x1c\x44\x7f\x88\x8f\xe0\x07\x6b\x8d\x5d\x43\x82\xac\x83\xec\x45\x13\x89\xd7\xb5\x98\x36\x10\x9d\x3d\x4f\xe7\xcb\x45\x99\x15\x55\x9e\x5e\x67\xfb\x08\xbe\x0b\x00\x29\x55\x6f\x08\xfd\x0e\xbd\x44\xdb\xf4\xce\x58\x0e\x68\x7a\x3b\xab\x16\x59\xf1\x2d\x2b\xaa\x65\x31\x1f\xe1\xfa\xe2\x5c\xea\x76\x20\x46\x2f\xb5\x0a\xe0\xe5\xc5\x79\x75\xf0\x9d\xa6\x23\xd8\x58\x1a\x41\xd3\x1f\x0f\xff\x9c\x2f\x46\x6a\x3b\xd4\xd8\x22\x4b\x7c\x62\xaf\xa4\xf2\x6b\x0a\xe4\xd7\xe5\x65\x36\xcf\xca\x2a\xbb\x2b\x8b\xb4\x4a\x8b\xab\x17\x81\x76\x96\x95\xb1\xe8\xa5\x1f\x2c\x9b\x0e\x7f\x3b\xdf\xe4\x65\x3a\xcb\xb3\xa2\x2a\x96\x79\x39\x0b\xb7\x9d\xc6\xd5\xa1\x5f\x87\xb8\x06\x42\x0f\xae\x67\xe3\x2c\x81\xb1\xec\xe0\xb0\x82\x76\x76\x65\xd6\xf1\x23\x39\x1b\x89\x90\x19\x4c\x7c\x07\x72\x05\x67\xcf\xe5\xf5\x6d\x15\x76\xaa\xa6\x37\xf9\x97\xfd\x04\xb2\xbb\x59\x29\x1e\x7f\x80\x24\x98\xc4\xf7\xef\x1f\xe0\x1d\xc4\xf7\x1f\x1e\x26\xc7\xbb\x07\x74\x76\xb5\x8f\x5e\xdf\x33\x76\x3e\x85\xde\xbf\xde\x91\xe8\x76\x6f\x54\xdf\x32\x16\x82\x7e\x12\x63\xa7\xb9\x85\x46\x61\xe7\xac\xf4\xd8\x3a\xd5\x9c\x7e\x14\x24\x56\x9e\xc3\xf1\x63\xda\x5b\x8a\x8e\xd4\x7f\x89\x43\xfb\x44\xdf\x38\x8b\x91\xf8\x15\x00\x00\xff\xff\xb9\x79\x57\x7c\xf3\x02\x00\x00")
func bindataAssetsBootstrapAl2ShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsBootstrapAl2Sh,
"bindata/assets/bootstrap.al2.sh",
)
}
func bindataAssetsBootstrapAl2Sh() (*asset, error) {
bytes, err := bindataAssetsBootstrapAl2ShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/bootstrap.al2.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3d, 0x1a, 0x18, 0x7b, 0x3b, 0xc, 0x5a, 0xfe, 0xcd, 0xaf, 0x4a, 0x56, 0xd7, 0xbb, 0x38, 0x7e, 0x27, 0xdf, 0x36, 0xa8, 0x4d, 0x27, 0x12, 0xce, 0x6d, 0x48, 0x6f, 0x84, 0xab, 0x31, 0x57, 0xc9}}
return a, nil
}
var _bindataAssetsBootstrapHelperSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x54\x7f\x6f\xda\x48\x10\xfd\x7f\x3f\xc5\xdc\x82\x9a\xa0\xcb\xe2\x36\xea\x45\x3a\x24\xa4\x23\xe0\xf6\xac\x12\x83\xc0\x9c\x72\x8a\x22\x6b\xb1\x87\xb0\xc5\xec\x5a\xbb\xe3\x92\x2a\xf2\x77\x3f\xd9\xc4\x89\xc9\xa9\xf9\x6b\x3d\x3f\xde\x9b\x37\xe3\xd1\x74\x7e\xf3\xd6\x4a\x7b\x6b\xe9\xb6\x8c\x39\x24\x10\x06\xd0\x5a\x7c\x54\xd4\x98\xb9\xca\x71\x23\x55\xd6\xd8\xda\x14\xda\x21\x31\xe6\x4c\x61\x13\x04\x0f\x29\xf1\x70\xe7\x12\xca\xbc\x5d\xb1\xc6\x0c\xa9\x8f\xfa\x07\x74\x60\xa3\x32\x84\x83\x55\x44\xa8\x61\xfd\x13\xd6\xc6\x90\x23\x2b\xf3\x1c\x2d\x63\x1d\x58\x39\x84\xe0\x66\xb2\xfc\x71\x09\x64\xe0\x01\x09\xf6\x48\x32\x95\x24\x59\x34\xfb\xe6\x87\x43\xde\x3d\x4f\x0a\x9b\x81\x10\x4e\x65\xa8\x09\xc4\x2d\xcc\x57\x11\x88\xbf\x81\xdf\x0a\x79\x70\x02\x93\x4b\xd1\x80\x04\x99\x1d\x6a\x41\x94\x09\x87\x89\xd1\xa9\x1b\xc0\xd5\xc7\x8f\x1c\xb6\x44\xf9\xc0\xf3\x3e\x5d\xfd\xd9\xbf\xfc\xe3\x73\xff\xf9\xf5\x32\x49\xe8\xc8\x93\xb9\xf2\x6a\x64\x8f\xb3\x4d\xa1\x13\x52\x46\x57\x62\xe2\x86\xf7\xbc\x07\x4f\x0c\xe0\x8d\x92\x77\x24\x0c\xa0\x5b\xeb\xe7\xc0\xdf\x2f\x5d\xc1\x44\x85\xf3\xba\x9f\x38\x2b\x19\x1b\xcd\x83\x78\xe9\x2f\xfe\xf1\x17\xf1\x6a\x31\x1d\xf2\xee\xd3\xa9\xa7\xe4\xec\xfa\xea\x73\x3c\x9e\xae\x96\x91\xbf\x88\xc7\xa3\x2a\xe5\xd4\x53\x72\x16\x84\xcb\x68\x14\x8e\xfd\x38\x98\x54\x23\x6c\xf7\x02\x4a\x3b\x92\x3a\x41\xa1\xd2\x5e\x2b\x73\x1a\x7c\xf1\xc7\xff\x8e\xa7\xfe\xaf\x01\x99\xda\xa0\x48\x7e\x26\x19\xf6\x38\x6b\xea\x4d\xc2\x65\x25\xa1\x65\x0e\x44\xc9\x59\x38\x9b\xf8\x71\x34\x0a\xc2\xa8\x0e\xb7\xcc\x3a\x7c\x33\xba\x8d\xe7\xb3\x49\x1d\x6b\xbe\x5f\x71\xd3\xd1\xb5\x3f\x7d\xc5\x1d\xcd\xf2\x42\x9b\xf4\x28\xa2\xd6\x30\xec\x3e\xfd\x5f\x7c\x79\x21\xb3\x7c\x2b\xfb\xc7\x6d\xec\x2b\xe3\xb5\xda\x6d\x23\x82\x49\xc9\x19\xfb\xb6\xba\xf6\xa7\x7e\x14\x8f\x16\x5f\x97\xc3\x73\x2e\xc4\xb1\x84\x5c\x63\xe6\x86\xa7\xd5\x79\x8f\xdd\xdd\x81\xd0\x70\xda\x4d\xc9\xe1\xfe\x1e\x3e\x7c\x80\x36\xd5\xef\x35\x97\xc5\x07\xe5\x08\xad\x38\x28\xda\x0a\x92\x4a\xd3\x0b\x69\x03\xee\xb1\x0e\x08\xb1\x97\x8f\x22\x37\xa9\x03\xe9\x40\xc2\x78\x1a\x80\xb4\x0f\xc5\xbe\xda\x32\xe5\x20\xc5\xdc\x62\x22\x09\xd3\x0b\xa0\xad\x72\x95\x4f\xc2\xc1\xd8\x9d\xb4\xa6\xd0\x29\x14\x9a\x54\x06\x07\x7c\xcd\x04\x57\xe4\xb9\xb1\x04\x1b\x63\x61\x2f\x1f\xe7\x26\x75\x73\xb4\xa1\x49\xf1\xb5\x8b\x66\xee\xef\xb4\xd0\x08\x1b\xb6\xb3\x7b\x2f\x63\xf3\x6f\xa3\xc5\xe8\x38\x3c\xde\x7d\x6a\xc3\xef\xfe\xba\xaf\xe6\xdb\x6c\x45\x38\xba\xf1\xdb\x5b\x52\xd9\x25\x7f\xe1\x19\xcf\xc2\x2f\xc1\xd7\xe1\x59\x7d\x48\xaa\x0b\x62\x35\x12\xba\xe6\x98\x34\xaf\x48\x8c\xde\xa8\x87\xfe\x77\x67\xf4\xd9\x1b\x11\x27\x14\xa7\xb7\x48\xe0\x23\x59\xf9\x8c\x8a\x6e\xe6\x71\x85\xac\x01\xc3\x33\x8f\xf6\xf9\x09\xfd\x73\xda\x78\x16\x56\xbf\xc8\x5f\xc4\x8b\x55\x18\x05\xcf\xea\xdf\x3a\x07\x22\x35\xc9\x0e\x6d\x5a\x72\xe8\x40\x8a\x1b\x59\x64\xc7\x99\xcb\xec\x12\xbe\x17\x8e\x40\x69\x48\xa4\xc3\x0b\xd0\x86\xa0\x70\x98\x56\x9e\x62\x5d\x68\x2a\xd8\x7f\x01\x00\x00\xff\xff\xf7\xbf\x51\x00\x7b\x05\x00\x00")
func bindataAssetsBootstrapHelperShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsBootstrapHelperSh,
"bindata/assets/bootstrap.helper.sh",
)
}
func bindataAssetsBootstrapHelperSh() (*asset, error) {
bytes, err := bindataAssetsBootstrapHelperShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/bootstrap.helper.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x45, 0x8c, 0xa1, 0xd0, 0x1e, 0x75, 0xe, 0x4b, 0x2f, 0x73, 0xb3, 0x29, 0x78, 0xe7, 0x17, 0x48, 0xff, 0x1e, 0x0, 0xa7, 0xca, 0xe1, 0x49, 0x7b, 0x15, 0x9c, 0xb8, 0xf8, 0x11, 0x53, 0x96, 0x8a}}
return a, nil
}
var _bindataAssetsBootstrapLegacyAl2Sh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x93\x61\x4f\x02\x47\x10\x86\xbf\xef\xaf\x98\x1e\x17\x23\xa9\xcb\xa9\xb1\x26\xa2\x34\xa1\xdc\x99\x5e\x8a\x40\x0a\xb6\x1a\x63\x2f\xcb\xde\x50\x36\x2e\xbb\x97\xdb\x01\x35\xe4\xfa\xdb\x9b\xc5\xc3\x82\x5a\x3f\xc1\xcc\xbc\xb3\xf3\xee\x33\xb7\x8d\x1f\xa2\xa9\x32\xd1\x54\xb8\x39\x6b\xc0\x64\x18\x0f\x21\xc6\xa2\x44\x29\x08\xf3\x23\x78\x56\x5a\xc3\x14\xa1\xc4\x85\x5d\x61\x0e\xce\x5a\xc3\x98\x43\x02\x6e\x01\xcb\x12\x5f\x14\x6d\xc3\x42\x15\x38\x13\x4a\x6f\x63\x63\x97\xc6\x21\x31\x36\x5b\x1a\x49\xca\x1a\xf8\x1b\x29\x5b\x88\x97\xac\xb0\xb9\x3b\x6c\xc2\x9a\x01\x3c\xcf\x95\xf6\xc7\x8b\x1c\x94\x71\x24\x8c\xc4\x8c\x5e\x0b\x04\xaf\xb9\x84\xdc\x32\x00\x00\x35\x03\x78\x78\x80\x20\x5c\xef\x89\xaa\x00\x3a\x1d\x9f\x3d\xa9\x02\x78\x7c\x84\x83\x83\x5a\xe5\x9b\x7d\xf1\x1f\xf8\xeb\xe1\x98\x5f\x3c\xfe\x18\xfa\xf2\x25\xd0\x1c\xcd\xe6\x40\x00\x94\x73\x0b\xb5\xb2\x4e\x95\x48\xcb\xf2\xad\x3e\x53\x0c\x20\xb7\x06\xe1\x0a\x22\x24\x19\xe1\x93\x93\xa4\xa3\xad\xfb\xd6\x42\x14\xac\x62\xac\x01\xb7\x0e\x21\xbd\x89\xc7\xab\x53\x20\xeb\x6f\x08\x0b\x24\x91\x0b\x12\x6c\x32\xfc\x2d\x19\x74\x82\xf0\x50\x2e\x4b\x0d\x9c\x3b\xa5\xd1\x10\xf0\x3b\x18\xdd\x4e\x80\xff\x0a\xc1\x1d\x17\xcf\x8e\xa3\x3c\xe5\xdb\x26\x4e\xf6\x09\x0d\x27\xd2\xdc\xa1\xb4\x26\x77\x6d\x38\x3f\x3e\x0e\x60\x4e\x54\xb4\xa3\xe8\xe4\xfc\xa2\x75\xfa\xd3\x59\xab\xfe\x8d\xb4\x20\x74\x14\x89\x42\x45\x9b\xce\x66\xf0\x01\x77\x7d\x6e\x8d\xfb\x83\x93\x6f\x2c\xb4\x21\xdc\xf8\x0f\x20\xf8\x7e\xb4\x6f\xe3\xbe\x2f\x0a\x4f\x02\xcf\x64\x30\x8c\x93\x2c\x1d\xf9\x8b\xef\x3a\x00\x6d\xa5\xd0\x5c\x15\xab\xb3\x66\xc0\xd2\xc1\x78\xd2\x1d\xf4\x92\x2c\x8d\x3f\x09\xb7\x3b\xe6\x2a\xdf\x55\x4e\xee\x47\xc9\xff\x6b\xfd\xf7\xd0\x0c\x58\xf7\xcf\x71\x36\x4e\x7e\xff\x23\xed\x25\xe3\x2c\x1e\xde\x74\xd3\xc1\xa7\x1e\x87\xe5\x4a\x49\x74\x51\x6e\x17\x42\x79\x64\x8c\x39\xbb\x2c\x25\xee\xed\xfa\x69\x39\x45\x8d\xd4\x42\xb3\x82\x06\xd0\x5c\x39\x90\xc2\x80\x5d\x61\x59\xaa\x1c\xe1\xa6\x7b\x97\x8d\x86\xf1\x98\xfd\x67\xb1\x9f\x5e\x27\xbd\xfb\x5e\xff\x1b\x9f\x5a\xcd\x90\xcb\x57\xa9\xbd\xdb\x0d\xaa\x7e\xf7\x97\xa4\x3f\xee\x04\xe1\x7a\x27\xac\x8e\x8c\xcd\xdf\xd4\x1b\x71\x27\x5c\x7f\x9e\x52\x79\xe7\x52\x10\xfc\xfc\xa5\xf1\x0d\xf0\x8d\xfd\xab\xab\x64\x78\xfd\xbe\x98\x7a\x50\x3a\xaa\xf6\xd6\xb0\x33\x21\x8d\xab\x0f\xdc\x77\x8a\x3e\xae\xbe\x04\x1d\xae\xbf\xc8\x56\x6c\x0b\xaa\x13\xae\xb7\x7f\xdb\xbc\xe6\x53\xbf\x27\xff\x68\xf7\x07\x04\xcd\x6a\x0f\xcf\x3e\x1d\xe6\xef\xc3\xdc\xab\x23\x5c\x48\xd2\x90\x0b\x5c\x58\xc3\x4b\xd4\x56\xe4\x3b\x79\x34\x62\xaa\x11\x6a\x22\x3b\x05\x47\xa2\xa4\xf7\xfc\xbf\x01\x00\x00\xff\xff\x46\x01\xeb\xc2\x06\x05\x00\x00")
func bindataAssetsBootstrapLegacyAl2ShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsBootstrapLegacyAl2Sh,
"bindata/assets/bootstrap.legacy.al2.sh",
)
}
func bindataAssetsBootstrapLegacyAl2Sh() (*asset, error) {
bytes, err := bindataAssetsBootstrapLegacyAl2ShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/bootstrap.legacy.al2.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x35, 0xb1, 0x58, 0xf5, 0xe6, 0x40, 0x92, 0xbe, 0xa6, 0x27, 0xc2, 0xfc, 0xbd, 0xc2, 0xe8, 0x54, 0x5e, 0x40, 0x37, 0x77, 0x73, 0x87, 0x9, 0xf, 0x97, 0xc4, 0xbc, 0x10, 0x5, 0x1b, 0x44, 0xf8}}
return a, nil
}
var _bindataAssetsBootstrapLegacyUbuntuSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x55\xf1\x6f\x1a\xb9\x12\xfe\xdd\x7f\xc5\xbc\x0d\xea\x0b\x7a\x31\xdb\xa4\x7d\x95\x9a\x96\xd3\x71\x81\xde\xa1\xa6\x10\x15\x72\xd7\x2a\xca\x21\x63\x0f\x59\x0b\xaf\xbd\xb2\x67\xa1\x51\xc4\xfd\xed\x27\x2f\xbb\x84\xd0\x36\x3f\xb1\xf6\xf7\xcd\xf8\xf3\xf8\x9b\xe1\xe8\x3f\xe9\x5c\xdb\x74\x2e\x42\xc6\x8e\x60\x3a\xee\x8f\xa1\x8f\x85\x47\x29\x08\xd5\x09\xac\xb5\x31\x30\x47\xf0\x98\xbb\x15\x2a\x08\xce\x59\xc6\x02\x12\x70\x07\xe8\x3d\x7e\xd3\xd4\x2c\x0b\x5d\xe0\x42\x68\xd3\xac\xad\x2b\x6d\x40\x62\x6c\x51\x5a\x49\xda\x59\xb8\x43\x9a\xe5\xe2\xdb\xac\x70\x2a\x1c\xb7\xe1\x81\x01\xac\x33\x6d\x62\x7a\xa1\x40\xdb\x40\xc2\x4a\x9c\xd1\x7d\x81\x10\x39\xef\x40\x39\x06\x00\xa0\x17\x00\x37\x37\x90\xb4\x1e\x9e\x90\x36\x09\x74\xbb\x71\xf7\x74\x93\xc0\xed\x2d\xbc\x78\x51\xb3\x62\x70\x04\xff\x81\xbf\x6f\x5e\xf2\xb7\xb7\xff\x6b\x45\xf8\x1d\x50\x86\xb6\x4a\x08\x80\x32\x73\x50\x33\xdf\xd5\x7b\x1e\xa9\xf4\x5b\xc2\x42\x33\x00\xe5\x2c\xc2\x7b\x48\x91\x64\x8a\xcb\x20\xc9\xa4\x8d\xfc\x4e\x2e\x0a\xb6\x61\xec\x08\xae\x03\xc2\xf0\x53\x7f\xb2\x3a\x03\x72\xf1\x8a\x90\x23\x09\x25\x48\xb0\xe9\xf8\xe3\x60\xd4\x4d\x5a\xc7\xb2\xf4\x06\x38\x0f\xda\xa0\x25\xe0\x5f\xe0\xea\x7a\x0a\xfc\x0f\x48\xbe\x70\xb1\x0e\x1c\xe5\x19\x6f\x82\x38\xb9\x25\x5a\x4e\x64\x78\x40\xe9\xac\x0a\xe7\xf0\xe6\xe5\xcb\x04\x32\xa2\xe2\x3c\x4d\x4f\xdf\xbc\xed\x9c\xfd\xff\x75\xa7\xfe\x4d\x8d\x20\x0c\x94\x8a\x42\xa7\x55\x64\x3b\x39\xa8\x77\x9d\xb7\xae\xf7\x81\x92\x67\x24\x9c\x43\xab\xd2\x9f\x40\xf2\xfc\xd1\x31\x8c\xc7\xb8\xb4\x75\x9a\xc4\x9a\x8c\xc6\xfd\xc1\x6c\x78\x15\x2f\xbe\xaf\x00\x8c\x93\xc2\x70\x5d\xac\x5e\xb7\x13\x36\x1c\x4d\xa6\xbd\xd1\xc5\x60\x36\xec\x7f\x47\x6c\x1e\x99\x6b\xb5\xcf\x9c\x7e\xbd\x1a\xfc\x9c\x1b\x0d\xd1\x4e\x58\xef\xaf\xc9\x6c\x32\xf8\xfc\xe7\xf0\x62\x30\x99\xf5\xc7\x9f\x7a\xc3\xd1\x77\x31\x01\xfd\x4a\x4b\x0c\xa9\x72\xb9\xd0\xb1\x64\x2c\xb8\xd2\x4b\x7c\xf2\xd4\xcb\x72\x8e\x06\xa9\x83\x76\x05\x47\x40\x99\x0e\x20\x85\x05\xb7\x42\xef\xb5\x42\xf8\xd4\xfb\x32\xbb\x1a\xf7\x27\x8c\x3d\x4a\xbc\x1c\x7e\x18\x5c\x7c\xbd\xb8\x7c\x46\xa7\xd1\x0b\xe4\xf2\x5e\x9a\xa8\x96\x49\x41\xf0\xcb\x0f\x8f\xad\xaa\x55\x1d\xfe\xfe\xfd\x60\xfc\x61\x57\xd5\xd6\x43\xfd\xb5\x79\x52\xc3\xd6\xc3\xde\x6a\x73\x50\xb4\x3d\x30\xae\x37\xac\xd1\xde\x6d\x3d\x34\x9f\xe7\xbc\x56\x5c\x3b\x3c\xf6\xd1\xd3\xa8\xa4\xbd\x61\x51\x09\x0b\x56\x14\x20\x8c\x16\x01\x6a\xb5\x1c\x97\xa1\x53\x7f\x37\x7b\x87\x34\x49\x66\x47\x93\x64\x9a\xbd\x2d\x2d\x90\x2b\xf6\x93\xb1\x70\x1f\x08\xf3\xc8\xf3\x18\x90\x78\x9c\x2c\xa8\x18\x3b\x66\x00\xdb\x41\x75\x1e\xdb\x39\x20\x84\xcc\x95\x46\xc5\x29\x65\x9c\x5b\xa2\x02\x41\x80\x2b\xf4\xf7\x40\x3a\xc7\x26\x29\x04\x12\x9e\x02\x94\xc5\x49\x95\x61\x9d\x69\x99\x81\x0e\xb0\xce\x04\xc1\x1a\x41\x39\xd0\x16\x7a\x97\x67\x70\xbc\xc3\xe6\x22\xa0\x02\x67\xa1\x30\x42\x5b\xd8\x6a\x52\xdb\x04\xc2\x2a\xc8\x51\x58\x8a\x6d\x3f\x8f\x03\xcb\x93\x98\x1b\x8c\xcb\xdc\x05\x6a\xd8\xa0\x74\x20\xef\x42\xfb\x04\xe6\x25\x81\xa6\xff\x86\x2a\xde\x3a\x02\x69\x50\x78\xc8\xdc\x3a\x06\x19\x27\x54\x7d\xa5\x85\x77\xf9\xa3\xf0\x58\x9f\xb5\xa6\xcc\x95\x04\x99\x58\x69\x7b\x57\x25\x20\x07\xb2\x0c\xe4\x72\x1d\x30\xc6\x6d\x89\x9a\x02\x9a\x05\x03\x78\xc6\xd1\x3b\x6b\x3d\x4f\xfb\x29\xa1\x31\x75\xc5\x60\x00\x0b\x23\xee\x42\xf7\xb8\x1a\x9c\x89\x75\x0a\xb9\x2e\xf6\x7c\x9a\x6c\x81\x5c\x7c\xe3\xd1\x58\x7b\x9e\x6b\xa0\x2a\xc6\x88\x39\x9a\xd0\xc4\x5d\xf6\x7e\x1b\x5c\x4e\x36\x27\xc2\x14\x99\xe8\x6c\x0f\xee\x68\x97\xee\xcd\x86\x03\xcf\x9f\x6c\xb3\xe8\x05\x56\xdd\xb5\x8f\xee\xda\xb2\x39\xb0\x70\x8a\x6b\xbb\xf0\x82\x4b\x67\x49\x68\x8b\x9e\xeb\x5c\xdc\xc5\xa8\x38\x41\x06\x1f\x27\xb3\xc1\xc5\xe7\x59\xef\xe2\x62\x7c\x3d\x9a\x6e\x3a\x6a\xe9\x3b\x28\x7d\x67\x0b\xf7\x07\x1f\x7a\xd7\x97\xd3\xd9\xe7\xc1\xef\xc3\xf1\x68\x53\xef\x1e\x8c\x9d\x4d\x2c\x57\x5a\x88\x32\xe0\xf9\xab\xce\xab\xe8\xea\x79\xa9\x8d\xea\x9c\xd6\x22\xa4\x71\xa5\xe2\x85\x77\x2b\xad\xd0\x77\xc5\x3a\x34\x80\xd5\x7c\xae\x2d\x57\xda\x77\x53\x57\x50\x2a\xad\x8e\x7f\xd3\x7b\xb0\x74\x76\xb1\xc5\xe3\xbb\x44\xdc\x22\x75\x54\xc3\xd8\x5d\xca\x97\x36\x76\x41\x57\x39\xb9\x44\xdf\x94\x1b\x69\xed\xfc\x92\x17\xa6\xbc\xd3\xb6\x2b\xad\xae\x01\x8f\x77\x3a\x10\x7a\x1e\x4b\xd9\x25\x5f\xe2\x21\x10\x7d\xc8\x63\x6e\xda\xbd\xd4\xb4\x37\x1c\x4d\x77\x4f\x59\x75\xb5\xb3\x0b\x7d\xd7\x3d\xf4\xd4\x76\xbb\x73\x2f\x72\xf3\xa8\xf3\x47\xc4\x68\xbe\x86\xd5\x8e\x06\xdb\x8e\x88\xc7\xd1\x12\x6b\x19\xe7\x53\x65\xbc\x9b\x5f\x6f\x37\x09\x6b\xb3\x66\x90\x08\xff\x84\xc7\xfe\x0d\x00\x00\xff\xff\x76\x63\xcb\x48\xe3\x08\x00\x00")
func bindataAssetsBootstrapLegacyUbuntuShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsBootstrapLegacyUbuntuSh,
"bindata/assets/bootstrap.legacy.ubuntu.sh",
)
}
func bindataAssetsBootstrapLegacyUbuntuSh() (*asset, error) {
bytes, err := bindataAssetsBootstrapLegacyUbuntuShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/bootstrap.legacy.ubuntu.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x38, 0xdc, 0xce, 0x7f, 0x6d, 0x30, 0x36, 0x5b, 0x8b, 0xc2, 0x38, 0x8d, 0x9e, 0xe4, 0xf8, 0x27, 0xd5, 0x77, 0x77, 0x8b, 0xaf, 0x59, 0xb3, 0x1a, 0xcd, 0x96, 0xe1, 0xcd, 0xc6, 0x2b, 0x19, 0x6b}}
return a, nil
}
var _bindataAssetsBootstrapUbuntuSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x91\xdf\x6b\xdb\x30\x10\xc7\xdf\xf5\x57\xdc\xb4\x82\x61\x20\x6b\x7b\xdd\xc3\x20\xeb\xbc\x52\xd6\x66\x23\x71\xa1\x90\x05\x23\x2b\x17\x5b\x89\x23\x69\x3a\x39\x04\x82\xff\xf7\x21\x86\xb3\x34\xc9\xe3\xdd\xf7\xc7\x7d\x6c\xbd\x7f\x27\x6b\x63\x65\xad\xa8\x65\x8c\x30\x82\x70\x80\x21\xe0\xc1\xc4\x71\xf4\xc6\xe3\x5a\x99\x6e\x9c\xad\xeb\x2d\x61\x64\x8c\x5c\x1f\x34\x82\xdc\xab\x20\x3b\x53\x4b\xdd\xb9\x7e\x25\x49\x07\xe3\x23\x49\xdc\x92\x8e\x9d\xac\x9d\x8b\x14\x83\xf2\x79\x8b\x9d\xc7\x90\xa7\x43\xa8\x5b\x07\xfc\x9f\xe3\x33\x84\xde\x5a\x63\x1b\x90\x18\x75\x8a\xfd\xcf\x70\x76\xbd\xcb\xa9\x05\x7e\x77\xbc\x7f\x7a\x99\x97\xc5\xac\x9a\x4e\x9e\x8b\x81\xc3\x6f\x06\x20\xc4\xca\x92\xd0\x5d\x4f\x11\x83\x30\xfe\xdc\xf6\x6d\x3a\x3f\xb9\xb6\x7d\x8d\x1d\x46\x81\x87\x18\x94\x50\xa1\xa1\xe4\xfc\xf1\xf2\xb5\x78\x2a\xca\xaa\x78\x2d\x67\x93\x6a\x32\x7b\x98\x0f\xfc\x92\x74\x87\xa1\x49\xa4\x3d\x61\x00\xe7\xa3\x71\x96\xc0\xd8\xe8\x60\xec\xd4\xce\xae\x4d\x93\x6f\xc8\x59\xce\x12\x2e\x64\x61\x07\x62\x0d\x77\xc7\xf2\xf9\x57\x95\x8e\x54\xf7\x3f\xa7\xdf\x87\x0c\x8a\xd7\xc7\x92\x6d\xfe\x80\x20\xc8\xf2\xc5\xc7\x25\x7c\x80\x7c\xf1\x69\x99\x9d\xc3\x24\xeb\xe3\xc3\xc0\xaf\x01\x4f\xca\x97\xa4\xbd\xed\xe6\x6c\xb7\xbf\xb1\xbd\x55\x7c\xf5\x16\x48\x51\x85\x98\x3e\xf2\xf4\x9b\xb6\xc4\x19\x59\xe5\x47\xf1\x5c\xb9\x88\xaf\x9c\x45\xce\xfe\x06\x00\x00\xff\xff\xad\x08\x49\xa5\x55\x02\x00\x00")
func bindataAssetsBootstrapUbuntuShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsBootstrapUbuntuSh,
"bindata/assets/bootstrap.ubuntu.sh",
)
}
func bindataAssetsBootstrapUbuntuSh() (*asset, error) {
bytes, err := bindataAssetsBootstrapUbuntuShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/bootstrap.ubuntu.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x94, 0xc2, 0xe1, 0x4f, 0x4c, 0xb5, 0x4d, 0x57, 0x4a, 0xa2, 0x51, 0x56, 0xf7, 0x8a, 0x83, 0xfe, 0xe7, 0x9c, 0xf3, 0x3f, 0x7b, 0xb6, 0x17, 0xc1, 0x29, 0x5b, 0x9c, 0x33, 0x76, 0xed, 0x28}}
return a, nil
}
var _bindataAssetsEfaAl2Sh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\xd0\x4d\x6a\xc3\x40\x0c\x05\xe0\xbd\x4e\xa1\xd2\xb5\x46\x25\xdd\x15\xba\xea\x01\x7a\x84\xa0\xa4\x1a\x7b\xc0\xf3\xd3\x91\x8c\x93\x9c\xbe\x38\x4d\xe8\xc2\x34\x9b\x81\x37\x7c\x12\x3c\x3d\x3f\xf1\x21\x15\x3e\x88\x8d\x00\xa6\x8e\x54\x51\x7b\xd7\x53\xf2\x7b\x6c\xa9\x69\x94\x34\xdd\x73\xa9\x73\x31\x75\x80\xf3\x9c\x31\x15\x73\x99\x26\xa4\x33\x2e\x83\x3a\xac\x0f\xd2\x37\x12\x79\xca\x5a\x67\x7f\xdf\xbd\xe0\xe8\xde\xec\x8d\xd9\x5e\x69\x36\x5a\xd4\x9c\x76\x41\xb2\x5c\x6a\x91\xc5\xc2\xb1\x66\x96\xc5\x48\xa3\xd0\x6d\x9f\xf6\xed\x0f\x4d\xe2\x6a\x1e\x5c\x7a\x18\x2e\x48\x9f\xc8\x9e\xdb\xd6\xdd\x00\xb8\x74\xa4\x53\x7c\xac\x90\x3e\xae\x00\x8e\x5f\xff\x40\x08\xac\x51\xf6\x7f\x83\x36\xae\x6d\x69\x00\xae\xcd\xf9\xb7\xc6\x4a\xae\x87\x8c\x69\x9f\x4a\xac\x48\x0d\x35\x0a\xfc\x04\x00\x00\xff\xff\x8f\x52\xee\x9a\x5f\x01\x00\x00")
func bindataAssetsEfaAl2ShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsEfaAl2Sh,
"bindata/assets/efa.al2.sh",
)
}
func bindataAssetsEfaAl2Sh() (*asset, error) {
bytes, err := bindataAssetsEfaAl2ShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/efa.al2.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xd9, 0x1d, 0x49, 0x21, 0x82, 0x43, 0xb0, 0xeb, 0x4d, 0xa2, 0x59, 0x42, 0xc6, 0xba, 0xc9, 0xb5, 0x60, 0xcd, 0x1d, 0x96, 0x18, 0x48, 0x80, 0x1b, 0x66, 0xda, 0x3e, 0x7, 0x93, 0xdb, 0x9e}}
return a, nil
}
var _bindataAssetsEfaManagedBoothook = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x4d\x4e\xc4\x30\x0c\x85\xf7\x39\x85\x2f\xe0\x04\x0d\x3b\x24\x56\x1c\x80\x23\x54\xa6\xe3\xb4\x91\x9a\x1f\x62\x47\x65\xe6\xf4\x28\xed\x88\x0d\xad\x60\x67\xf9\x3d\x7f\xcf\x7a\xe3\x92\xdb\x15\x43\x0a\x8a\x85\x2b\xe4\x34\x32\xdc\x5a\x1c\xd6\x89\xb5\x0f\x10\x92\x28\x2d\x0b\xe0\x0d\xfa\xce\x1c\x1d\x74\x61\x60\x4f\xdb\x00\xf8\x09\x88\x1a\x22\xe7\xa6\xaf\x97\x27\x98\x55\x8b\xbc\x38\x27\xcf\xd8\x04\x57\x16\xc5\x8b\xa5\x48\xf7\x9c\x68\x15\x3b\xe6\xe8\x68\x15\x64\x4f\xf8\x08\xe3\xfa\x7b\x83\x0b\x29\x8b\x5a\xa5\x6a\xa7\x3b\xe0\x3b\x38\x8d\xe5\x2f\x9f\x39\xfc\x57\xa9\x6e\xef\x2a\x55\xc0\x2f\xff\x2f\x12\xe0\xdb\xe6\x33\xa5\xc9\x7c\x3d\x39\x39\x4c\x7b\xa8\x5b\xa2\x75\xec\x69\xf8\xf1\x5b\x99\x7b\xb1\x38\x99\x92\xcb\x19\xf3\x10\xba\x63\x7c\x06\x97\x8b\xba\xbd\xcd\xce\x76\x1f\x21\x39\x1f\x76\x0d\x4b\xf7\x99\xef\x00\x00\x00\xff\xff\xd7\xd4\x31\xc2\xe4\x01\x00\x00")
func bindataAssetsEfaManagedBoothookBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsEfaManagedBoothook,
"bindata/assets/efa.managed.boothook",
)
}
func bindataAssetsEfaManagedBoothook() (*asset, error) {
bytes, err := bindataAssetsEfaManagedBoothookBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/efa.managed.boothook", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa6, 0x40, 0xf1, 0xae, 0x67, 0xa7, 0xe, 0xc, 0x31, 0x61, 0x86, 0x41, 0x8b, 0xd1, 0x55, 0x3f, 0x84, 0xbd, 0x4c, 0xdd, 0x84, 0xc9, 0xee, 0x9d, 0x36, 0xda, 0x8f, 0x6c, 0xbc, 0x6a, 0xaf, 0xed}}
return a, nil
}
var _bindataAssetsInstallSsmAl2Sh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\xca\x41\x0a\x02\x31\x0c\x05\xd0\x7d\x4f\x11\x71\x5d\xe6\x4c\xa9\x44\x0d\xa4\xe9\xd0\xff\x07\xac\xa7\x77\x35\x2b\x61\x96\x0f\xde\xfd\xb6\x35\xcf\xad\x29\xde\xa5\xc0\x28\x75\x88\xcd\x69\x1f\xe7\xc9\xdd\x77\x7b\xaa\xc7\xe9\x1c\x47\xc2\x58\xca\x3a\xba\x78\x82\x1a\x21\x75\x89\x76\xfd\x8e\xac\x40\xaf\xfa\xb2\x64\xc1\x02\xad\x3f\x18\x62\xa9\x2d\xec\x6a\x80\x3a\xf9\x1f\x7e\x01\x00\x00\xff\xff\x93\x2c\xf6\x43\x9f\x00\x00\x00")
func bindataAssetsInstallSsmAl2ShBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsInstallSsmAl2Sh,
"bindata/assets/install-ssm.al2.sh",
)
}
func bindataAssetsInstallSsmAl2Sh() (*asset, error) {
bytes, err := bindataAssetsInstallSsmAl2ShBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/install-ssm.al2.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x18, 0xf9, 0xf8, 0x5e, 0xb9, 0xcf, 0xfb, 0x94, 0xb4, 0x85, 0xa3, 0x62, 0xf0, 0x3b, 0x88, 0x44, 0xe3, 0x84, 0xfa, 0x85, 0x39, 0xdb, 0xed, 0xa2, 0x6a, 0x1, 0x7b, 0xe5, 0x49, 0x21, 0xef, 0x3d}}
return a, nil
}
var _bindataAssetsKubeletYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x91\xc1\xca\xdb\x30\x10\x84\xef\x7a\x8a\x85\x5e\x8b\xed\xb4\x04\x5a\xdd\xd2\x84\xf6\xd0\x40\xa0\x71\xdb\xf3\x5a\x1a\x37\xc2\xb2\x36\x48\xeb\xa4\xed\xd3\x97\xd8\xee\x0f\x81\x1f\x9d\x86\x99\x61\x3e\x49\x6f\xa8\x3d\x1d\x4e\x74\xc0\x35\xc3\xb1\xc2\xbf\xa5\x7b\x88\x91\x3a\x50\xc6\x28\x37\x78\x2a\x22\xc9\x0c\x21\x79\x4b\x5f\xa7\x0e\x11\xba\x97\xd4\x87\x5f\x53\x66\x0d\x92\x0c\x5f\xc3\x0f\xe4\x12\x24\x59\x1a\x96\x40\xe5\xe6\x44\x35\x7c\x28\x55\x90\xfa\xb6\xe9\xa0\xbc\x31\x86\xbd\xcf\x28\xc5\x52\x53\xcd\xc7\xb8\x38\x15\x45\x3e\xc8\xc8\x21\x59\x5a\x65\x15\xc5\x71\x34\x86\x27\xbd\x20\x69\x70\xf3\x90\x35\x44\x9c\x24\xfd\x19\x65\x2a\x0f\x41\x84\xc4\x5d\x84\xb7\xd4\x73\x2c\x30\x44\x77\x74\x17\x91\x61\x71\x1d\xbb\x0b\xda\xf6\x68\xe9\xdd\xd8\x94\xe7\x82\xe6\xe9\x91\xff\xbd\x6d\x3e\xae\xe1\x18\x90\x74\xbf\xfb\x1c\x22\x2c\xd5\x50\x57\x63\x28\x4e\x63\xed\xb8\x72\x59\x17\x1a\xc9\xe1\xef\x0b\xcc\x28\x1e\x96\x7e\x2e\x93\xaf\x8e\xef\xd6\x0a\xfc\x8c\xb1\xfd\x8f\x31\x9b\xdf\x13\x3f\xdb\xef\x9b\x62\x4c\x41\xbe\x21\xb7\xc7\xf3\x27\x11\x2d\x9a\xf9\xba\xc2\x9a\x1e\xac\x53\xc6\x17\x56\xcc\xd7\xff\x26\xca\x8a\xf5\x4b\xce\x73\x6d\x8f\xac\xa1\x7f\xbc\x17\xd6\xd6\xbf\x00\x00\x00\xff\xff\x46\x42\xbb\xf2\xe0\x01\x00\x00")
func bindataAssetsKubeletYamlBytes() ([]byte, error) {
return bindataRead(
_bindataAssetsKubeletYaml,
"bindata/assets/kubelet.yaml",
)
}
func bindataAssetsKubeletYaml() (*asset, error) {
bytes, err := bindataAssetsKubeletYamlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "bindata/assets/kubelet.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x90, 0x31, 0x7a, 0x5b, 0xa3, 0xbf, 0x93, 0x44, 0x7, 0x84, 0xfb, 0x4e, 0x10, 0x44, 0x47, 0xaa, 0xd1, 0x9, 0x71, 0x33, 0xdc, 0x75, 0x6b, 0x7c, 0x1c, 0x25, 0x25, 0xfa, 0x82, 0x6d, 0xc4, 0xf}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func | (name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"bindata/assets/10-eksctl.al2.conf": bindataAssets10EksctlAl2Conf,
"bindata/assets/bootstrap.al2.sh": bindataAssetsBootstrapAl2Sh,
"bindata/assets/bootstrap.helper.sh": bindataAssetsBootstrapHelperSh,
"bindata/assets/bootstrap.legacy.al2.sh": bindataAssetsBootstrapLegacyAl2Sh,
"bindata/assets/bootstrap.legacy.ubuntu.sh": bindataAssetsBootstrapLegacyUbuntuSh,
"bindata/assets/bootstrap.ubuntu.sh": bindataAssetsBootstrapUbuntuSh,
"bindata/assets/efa.al2.sh": bindataAssetsEfaAl2Sh,
"bindata/assets/efa.managed.boothook": bindataAssetsEfaManagedBoothook,
"bindata/assets/install-ssm.al2.sh": bindataAssetsInstallSsmAl2Sh,
"bindata/assets/kubelet.yaml": bindataAssetsKubeletYaml,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"bindata": {nil, map[string]*bintree{
"assets": {nil, map[string]*bintree{
"10-eksctl.al2.conf": {bindataAssets10EksctlAl2Conf, map[string]*bintree{}},
"bootstrap.al2.sh": {bindataAssetsBootstrapAl2Sh, map[string]*bintree{}},
"bootstrap.helper.sh": {bindataAssetsBootstrapHelperSh, map[string]*bintree{}},
"bootstrap.legacy.al2.sh": {bindataAssetsBootstrapLegacyAl2Sh, map[string]*bintree{}},
"bootstrap.legacy.ubuntu.sh": {bindataAssetsBootstrapLegacyUbuntuSh, map[string]*bintree{}},
"bootstrap.ubuntu.sh": {bindataAssetsBootstrapUbuntuSh, map[string]*bintree{}},
"efa.al2.sh": {bindataAssetsEfaAl2Sh, map[string]*bintree{}},
"efa.managed.boothook": {bindataAssetsEfaManagedBoothook, map[string]*bintree{}},
"install-ssm.al2.sh": {bindataAssetsInstallSsmAl2Sh, map[string]*bintree{}},
"kubelet.yaml": {bindataAssetsKubeletYaml, map[string]*bintree{}},
}},
}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}
| AssetInfo |
enum.rs | // Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use serde::Serialize;
use swc_ecma_ast;
use super::parser::DocParser;
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct EnumMemberDef { | #[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct EnumDef {
pub members: Vec<EnumMemberDef>,
}
pub fn get_doc_for_ts_enum_decl(
_doc_parser: &DocParser,
enum_decl: &swc_ecma_ast::TsEnumDecl,
) -> (String, EnumDef) {
let enum_name = enum_decl.id.sym.to_string();
let mut members = vec![];
for enum_member in &enum_decl.members {
use swc_ecma_ast::TsEnumMemberId::*;
let member_name = match &enum_member.id {
Ident(ident) => ident.sym.to_string(),
Str(str_) => str_.value.to_string(),
};
let member_def = EnumMemberDef { name: member_name };
members.push(member_def);
}
let enum_def = EnumDef { members };
(enum_name, enum_def)
} | pub name: String,
}
|
fake_liveMpc.py | #!/usr/bin/env python3
import zmq
import time
from hexdump import hexdump
import cereal.messaging as messaging
from cereal.services import service_list
from cereal import log
def | ():
liveMpc = messaging.pub_sock('liveMpc')
while 1:
m = messaging.new_message('liveMpc')
mx = []
for x in range(0, 100):
mx.append(x*1.0)
m.liveMpc.x = mx
liveMpc.send(m.to_bytes())
if __name__=="__main__":
mock_x()
| mock_x |
ctiappclear.rs | #[doc = "Register `CTIAPPCLEAR` writer"]
pub struct W(crate::W<CTIAPPCLEAR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<CTIAPPCLEAR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<CTIAPPCLEAR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<CTIAPPCLEAR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum APPCLEAR_0_AW {
#[doc = "1: Clears the event for channel 0."]
CLEAR = 1,
}
impl From<APPCLEAR_0_AW> for bool {
#[inline(always)]
fn from(variant: APPCLEAR_0_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `APPCLEAR_0` writer - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
pub struct APPCLEAR_0_W<'a> {
w: &'a mut W,
}
impl<'a> APPCLEAR_0_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: APPCLEAR_0_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clears the event for channel 0."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(APPCLEAR_0_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum APPCLEAR_1_AW {
#[doc = "1: Clears the event for channel 1."]
CLEAR = 1,
}
impl From<APPCLEAR_1_AW> for bool {
#[inline(always)]
fn from(variant: APPCLEAR_1_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `APPCLEAR_1` writer - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
pub struct APPCLEAR_1_W<'a> {
w: &'a mut W,
}
impl<'a> APPCLEAR_1_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: APPCLEAR_1_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clears the event for channel 1."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(APPCLEAR_1_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1);
self.w
}
}
#[doc = "Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum APPCLEAR_2_AW {
#[doc = "1: Clears the event for channel 2."]
CLEAR = 1,
}
impl From<APPCLEAR_2_AW> for bool {
#[inline(always)]
fn from(variant: APPCLEAR_2_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `APPCLEAR_2` writer - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
pub struct APPCLEAR_2_W<'a> {
w: &'a mut W,
}
impl<'a> APPCLEAR_2_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: APPCLEAR_2_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clears the event for channel 2."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(APPCLEAR_2_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum APPCLEAR_3_AW {
#[doc = "1: Clears the event for channel 3."]
CLEAR = 1,
}
impl From<APPCLEAR_3_AW> for bool {
#[inline(always)]
fn from(variant: APPCLEAR_3_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `APPCLEAR_3` writer - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
pub struct APPCLEAR_3_W<'a> {
w: &'a mut W,
}
impl<'a> APPCLEAR_3_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)] | pub fn variant(self, variant: APPCLEAR_3_AW) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Clears the event for channel 3."]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(APPCLEAR_3_AW::CLEAR)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
impl W {
#[doc = "Bit 0 - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
#[inline(always)]
pub fn appclear_0(&mut self) -> APPCLEAR_0_W {
APPCLEAR_0_W { w: self }
}
#[doc = "Bit 1 - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
#[inline(always)]
pub fn appclear_1(&mut self) -> APPCLEAR_1_W {
APPCLEAR_1_W { w: self }
}
#[doc = "Bit 2 - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
#[inline(always)]
pub fn appclear_2(&mut self) -> APPCLEAR_2_W {
APPCLEAR_2_W { w: self }
}
#[doc = "Bit 3 - Sets the corresponding bits in the CTIAPPSET to 0. There is one bit of the register for each channel."]
#[inline(always)]
pub fn appclear_3(&mut self) -> APPCLEAR_3_W {
APPCLEAR_3_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "CTI Application Trigger Clear register\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ctiappclear](index.html) module"]
pub struct CTIAPPCLEAR_SPEC;
impl crate::RegisterSpec for CTIAPPCLEAR_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [ctiappclear::W](W) writer structure"]
impl crate::Writable for CTIAPPCLEAR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets CTIAPPCLEAR to value 0"]
impl crate::Resettable for CTIAPPCLEAR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | |
tsh.go | /*
Copyright 2016 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/signal"
"path"
"runtime"
"strings"
"syscall"
"time"
"golang.org/x/crypto/ssh"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/asciitable"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/defaults"
kubeclient "github.com/gravitational/teleport/lib/kube/client"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/trace"
gops "github.com/google/gops/agent"
"github.com/jonboulle/clockwork"
"github.com/sirupsen/logrus"
)
// CLIConf stores command line arguments and flags:
type CLIConf struct {
// UserHost contains "[login]@hostname" argument to SSH command
UserHost string
// Commands to execute on a remote host
RemoteCommand []string
// Username is the Teleport user's username (to login into proxies)
Username string
// Proxy keeps the hostname:port of the SSH proxy to use
Proxy string
// TTL defines how long a session must be active (in minutes)
MinsToLive int32
// SSH Port on a remote SSH host
NodePort int32
// Login on a remote SSH host
NodeLogin string
// InsecureSkipVerify bypasses verification of HTTPS certificate when talking to web proxy
InsecureSkipVerify bool
// IsUnderTest is set to true for unit testing
IsUnderTest bool
// AgentSocketAddr is address for agent listeing socket
AgentSocketAddr utils.NetAddrVal
// Remote SSH session to join
SessionID string
// Src:dest parameter for SCP
CopySpec []string
// -r flag for scp
RecursiveCopy bool
// -L flag for ssh. Local port forwarding like 'ssh -L 80:remote.host:80 -L 443:remote.host:443'
LocalForwardPorts []string
// ForwardAgent agent to target node. Equivalent of -A for OpenSSH.
ForwardAgent bool
// --local flag for ssh
LocalExec bool
// SiteName specifies remote site go login to
SiteName string
// Interactive, when set to true, launches remote command with the terminal attached
Interactive bool
// Quiet mode, -q command (disables progress printing)
Quiet bool
// Namespace is used to select cluster namespace
Namespace string
// NoCache is used to turn off client cache for nodes discovery
NoCache bool
// LoadSystemAgentOnly when set to true will cause tsh agent to load keys into the system agent and
// then exit. This is useful when calling tsh agent from a script (for example ~/.bash_profile)
// to load keys into your system agent.
LoadSystemAgentOnly bool
// BenchThreads is amount of concurrent threads to run
BenchThreads int
// BenchDuration is a duration for the benchmark
BenchDuration time.Duration
// BenchRate is a requests per second rate to mantain
BenchRate int
// BenchInteractive indicates that we should create interactive session
BenchInteractive bool
// Context is a context to control execution
Context context.Context
// Gops starts gops agent on a specified address
// if not specified, gops won't start
Gops bool
// GopsAddr specifies to gops addr to listen on
GopsAddr string
// IdentityFileIn is an argument to -i flag (path to the private key+cert file)
IdentityFileIn string
// Compatibility flags, --compat, specifies OpenSSH compatibility flags.
Compatibility string
// CertificateFormat defines the format of the user SSH certificate.
CertificateFormat string
// IdentityFileOut is an argument to -out flag
IdentityFileOut string
// IdentityFormat (used for --format flag for 'tsh login') defines which
// format to use with --out to store a fershly retreived certificate
IdentityFormat client.IdentityFileFormat
// AuthConnector is the name of the connector to use.
AuthConnector string
// SkipVersionCheck skips version checking for client and server
SkipVersionCheck bool
}
func main() {
cmd_line_orig := os.Args[1:]
cmd_line := []string{}
// lets see: if the executable name is 'ssh' or 'scp' we convert
// that to "tsh ssh" or "tsh scp"
switch path.Base(os.Args[0]) {
case "ssh":
cmd_line = append([]string{"ssh"}, cmd_line_orig...)
case "scp":
cmd_line = append([]string{"scp"}, cmd_line_orig...)
default:
cmd_line = cmd_line_orig
}
Run(cmd_line, false)
}
const (
clusterEnvVar = "TELEPORT_SITE"
clusterHelp = "Specify the cluster to connect"
)
// Run executes TSH client. same as main() but easier to test
func Run(args []string, underTest bool) {
var cf CLIConf
cf.IsUnderTest = underTest
utils.InitLogger(utils.LoggingForCLI, logrus.WarnLevel)
// configure CLI argument parser:
app := utils.InitCLIParser("tsh", "TSH: Teleport SSH client").Interspersed(false)
app.Flag("login", "Remote host login").Short('l').Envar("TELEPORT_LOGIN").StringVar(&cf.NodeLogin)
localUser, _ := client.Username()
app.Flag("proxy", "SSH proxy address").Envar("TELEPORT_PROXY").StringVar(&cf.Proxy)
app.Flag("nocache", "do not cache cluster discovery locally").Hidden().BoolVar(&cf.NoCache)
app.Flag("user", fmt.Sprintf("SSH proxy user [%s]", localUser)).Envar("TELEPORT_USER").StringVar(&cf.Username)
app.Flag("ttl", "Minutes to live for a SSH session").Int32Var(&cf.MinsToLive)
app.Flag("identity", "Identity file").Short('i').StringVar(&cf.IdentityFileIn)
app.Flag("compat", "OpenSSH compatibility flag").Hidden().StringVar(&cf.Compatibility)
app.Flag("cert-format", "SSH certificate format").StringVar(&cf.CertificateFormat)
app.Flag("insecure", "Do not verify server's certificate and host name. Use only in test environments").Default("false").BoolVar(&cf.InsecureSkipVerify)
app.Flag("auth", "Specify the type of authentication connector to use.").StringVar(&cf.AuthConnector)
app.Flag("namespace", "Namespace of the cluster").Default(defaults.Namespace).Hidden().StringVar(&cf.Namespace)
app.Flag("gops", "Start gops endpoint on a given address").Hidden().BoolVar(&cf.Gops)
app.Flag("gops-addr", "Specify gops addr to listen on").Hidden().StringVar(&cf.GopsAddr)
app.Flag("skip-version-check", "Skip version checking between server and client.").Hidden().BoolVar(&cf.SkipVersionCheck)
debugMode := app.Flag("debug", "Verbose logging to stdout").Short('d').Bool()
app.HelpFlag.Short('h')
ver := app.Command("version", "Print the version")
// ssh
ssh := app.Command("ssh", "Run shell or execute a command on a remote SSH node")
ssh.Arg("[user@]host", "Remote hostname and the login to use").Required().StringVar(&cf.UserHost)
ssh.Arg("command", "Command to execute on a remote host").StringsVar(&cf.RemoteCommand)
ssh.Flag("port", "SSH port on a remote host").Short('p').Int32Var(&cf.NodePort)
ssh.Flag("forward-agent", "Forward agent to target node").Short('A').BoolVar(&cf.ForwardAgent)
ssh.Flag("forward", "Forward localhost connections to remote server").Short('L').StringsVar(&cf.LocalForwardPorts)
ssh.Flag("local", "Execute command on localhost after connecting to SSH node").Default("false").BoolVar(&cf.LocalExec)
ssh.Flag("tty", "Allocate TTY").Short('t').BoolVar(&cf.Interactive)
ssh.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
// join
join := app.Command("join", "Join the active SSH session")
join.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
join.Arg("session-id", "ID of the session to join").Required().StringVar(&cf.SessionID)
// play
play := app.Command("play", "Replay the recorded SSH session")
play.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
play.Arg("session-id", "ID of the session to play").Required().StringVar(&cf.SessionID)
// scp
scp := app.Command("scp", "Secure file copy")
scp.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
scp.Arg("from, to", "Source and destination to copy").Required().StringsVar(&cf.CopySpec)
scp.Flag("recursive", "Recursive copy of subdirectories").Short('r').BoolVar(&cf.RecursiveCopy)
scp.Flag("port", "Port to connect to on the remote host").Short('P').Int32Var(&cf.NodePort)
scp.Flag("quiet", "Quiet mode").Short('q').BoolVar(&cf.Quiet)
// ls
ls := app.Command("ls", "List remote SSH nodes")
ls.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
ls.Arg("labels", "List of labels to filter node list").StringVar(&cf.UserHost)
// clusters
clusters := app.Command("clusters", "List available Teleport clusters")
clusters.Flag("quiet", "Quiet mode").Short('q').BoolVar(&cf.Quiet)
// login logs in with remote proxy and obtains a "session certificate" which gets
// stored in ~/.tsh directory
login := app.Command("login", "Log in to a cluster and retrieve the session certificate")
login.Flag("out", "Identity output").Short('o').StringVar(&cf.IdentityFileOut)
login.Flag("format", fmt.Sprintf("Identity format [%s] or %s (for OpenSSH compatibility)",
client.DefaultIdentityFormat,
client.IdentityFormatOpenSSH)).Default(string(client.DefaultIdentityFormat)).StringVar((*string)(&cf.IdentityFormat))
login.Arg("cluster", clusterHelp).StringVar(&cf.SiteName)
login.Alias(loginUsageFooter)
// logout deletes obtained session certificates in ~/.tsh
logout := app.Command("logout", "Delete a cluster certificate")
// bench
bench := app.Command("bench", "Run shell or execute a command on a remote SSH node").Hidden()
bench.Flag("cluster", clusterHelp).Envar(clusterEnvVar).StringVar(&cf.SiteName)
bench.Arg("[user@]host", "Remote hostname and the login to use").Required().StringVar(&cf.UserHost)
bench.Arg("command", "Command to execute on a remote host").Required().StringsVar(&cf.RemoteCommand)
bench.Flag("port", "SSH port on a remote host").Short('p').Int32Var(&cf.NodePort)
bench.Flag("threads", "Concurrent threads to run").Default("10").IntVar(&cf.BenchThreads)
bench.Flag("duration", "Test duration").Default("1s").DurationVar(&cf.BenchDuration)
bench.Flag("rate", "Requests per second rate").Default("10").IntVar(&cf.BenchRate)
bench.Flag("interactive", "Create interactive SSH session").BoolVar(&cf.BenchInteractive)
// show key
show := app.Command("show", "Read an identity from file and print to stdout").Hidden()
show.Arg("identity_file", "The file containing a public key or a certificate").Required().StringVar(&cf.IdentityFileIn)
// The status command shows which proxy the user is logged into and metadata
// about the certificate.
status := app.Command("status", "Display the list of proxy servers and retrieved certificates")
// On Windows, hide the "ssh", "join", "play", "scp", and "bench" commands
// because they all use a terminal.
if runtime.GOOS == teleport.WindowsOS {
ssh.Hidden()
join.Hidden()
play.Hidden()
scp.Hidden()
bench.Hidden()
}
// parse CLI commands+flags:
command, err := app.Parse(args)
if err != nil {
utils.FatalError(err)
}
// apply -d flag:
if *debugMode {
utils.InitLogger(utils.LoggingForCLI, logrus.DebugLevel)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
exitSignals := make(chan os.Signal, 1)
signal.Notify(exitSignals, syscall.SIGTERM, syscall.SIGINT)
select {
case sig := <-exitSignals:
logrus.Debugf("signal: %v", sig)
cancel()
}
}()
cf.Context = ctx
if cf.Gops {
logrus.Debugf("starting gops agent")
err = gops.Listen(&gops.Options{Addr: cf.GopsAddr})
if err != nil {
logrus.Warningf("failed to start gops agent %v", err)
}
}
switch command {
case ver.FullCommand():
utils.PrintVersion()
case ssh.FullCommand():
onSSH(&cf)
case bench.FullCommand():
onBenchmark(&cf)
case join.FullCommand():
onJoin(&cf)
case scp.FullCommand():
onSCP(&cf)
case play.FullCommand():
onPlay(&cf)
case ls.FullCommand():
onListNodes(&cf)
case clusters.FullCommand():
onListSites(&cf)
case login.FullCommand():
onLogin(&cf)
case logout.FullCommand():
refuseArgs(logout.FullCommand(), args)
onLogout(&cf)
case show.FullCommand():
onShow(&cf)
case status.FullCommand():
onStatus(&cf)
}
}
// onPlay replays a session with a given ID
func onPlay(cf *CLIConf) {
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
}
if err := tc.Play(context.TODO(), cf.Namespace, cf.SessionID); err != nil {
utils.FatalError(err)
}
}
// onLogin logs in with remote proxy and gets signed certificates
func onLogin(cf *CLIConf) {
var (
err error
tc *client.TeleportClient
key *client.Key
)
if cf.IdentityFileIn != "" {
utils.FatalError(trace.BadParameter("-i flag cannot be used here"))
}
if cf.IdentityFormat != client.IdentityFormatOpenSSH && cf.IdentityFormat != client.IdentityFormatFile {
utils.FatalError(trace.BadParameter("invalid identity format: %s", cf.IdentityFormat))
}
// Get the status of the active profile ~/.tsh/profile as well as the status
// of any other proxies the user is logged into.
profile, profiles, err := client.Status("", cf.Proxy)
if err != nil {
if !trace.IsNotFound(err) {
utils.FatalError(err)
}
}
// make the teleport client and retrieve the certificate from the proxy:
tc, err = makeClient(cf, true)
if err != nil {
utils.FatalError(err)
}
// client is already logged in and profile is not expired
if profile != nil && !profile.IsExpired(clockwork.NewRealClock()) {
switch {
// in case if nothing is specified, print current status
case cf.Proxy == "" && cf.SiteName == "":
printProfiles(profile, profiles)
return
// in case if parameters match, print current status
case host(cf.Proxy) == host(profile.ProxyURL.Host) && cf.SiteName == profile.Cluster:
printProfiles(profile, profiles)
return
// proxy is unspecified or the same as the currently provided proxy,
// but cluster is specified, treat this as selecting a new cluster
// for the same proxy
case (cf.Proxy == "" || host(cf.Proxy) == host(profile.ProxyURL.Host)) && cf.SiteName != "":
tc.SaveProfile("")
if err := kubeclient.UpdateKubeconfig(tc); err != nil {
utils.FatalError(err)
}
onStatus(cf)
return
// otherwise just passthrough to standard login
default:
}
}
if cf.Username == "" {
cf.Username = tc.Username
}
// -i flag specified? save the retreived cert into an identity file
makeIdentityFile := (cf.IdentityFileOut != "")
activateKey := !makeIdentityFile
if key, err = tc.Login(cf.Context, activateKey); err != nil {
utils.FatalError(err)
}
if makeIdentityFile {
client.MakeIdentityFile(cf.IdentityFileOut, key, cf.IdentityFormat)
fmt.Printf("\nThe certificate has been written to %s\n", cf.IdentityFileOut)
return
}
// update kubernetes config file
if err := kubeclient.UpdateKubeconfig(tc); err != nil {
utils.FatalError(err)
}
// regular login (without -i flag)
tc.SaveProfile("")
onStatus(cf)
}
// onLogout deletes a "session certificate" from ~/.tsh for a given proxy
func onLogout(cf *CLIConf) {
client.UnlinkCurrentProfile()
// extract the proxy name
proxyHost, _, err := net.SplitHostPort(cf.Proxy)
if err != nil {
proxyHost = cf.Proxy
}
switch {
// proxy and username for key to remove
case proxyHost != "" && cf.Username != "":
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
return
}
// Remove keys for this user from disk and running agent.
err = tc.Logout()
if err != nil {
if trace.IsNotFound(err) {
fmt.Printf("User %v already logged out from %v.\n", cf.Username, proxyHost)
os.Exit(1)
}
utils.FatalError(err)
return
}
fmt.Printf("Logged out %v from %v.\n", cf.Username, proxyHost)
// remove all keys
case proxyHost == "" && cf.Username == "":
// The makeClient function requires a proxy. However this value is not used
// because the user will be logged out from all proxies. Pass a dummy value
// to allow creation of the TeleportClient.
cf.Proxy = "dummy:1234"
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
return
}
// Remove all keys from disk and the running agent.
err = tc.LogoutAll()
if err != nil {
utils.FatalError(err)
return
}
fmt.Printf("Logged out all users from all proxies.\n")
default:
fmt.Printf("Specify --proxy and --user to remove keys for specific user ")
fmt.Printf("from a proxy or neither to log out all users from all proxies.\n")
}
}
// onListNodes executes 'tsh ls' command
func onListNodes(cf *CLIConf) {
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
}
nodes, err := tc.ListNodes(context.TODO())
if err != nil {
utils.FatalError(err)
}
t := asciitable.MakeTable([]string{"Node Name", "Node ID", "Address", "Labels"})
for _, n := range nodes {
t.AddRow([]string{
n.GetHostname(), n.GetName(), n.GetAddr(), n.LabelsString(),
})
}
fmt.Println(t.AsBuffer().String())
}
// onListSites executes 'tsh sites' command
func onListSites(cf *CLIConf) {
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
}
proxyClient, err := tc.ConnectToProxy(cf.Context)
if err != nil {
utils.FatalError(err)
}
defer proxyClient.Close()
sites, err := proxyClient.GetSites()
if err != nil {
utils.FatalError(err)
}
var t asciitable.Table
if cf.Quiet {
t = asciitable.MakeHeadlessTable(2)
} else {
t = asciitable.MakeTable([]string{"Cluster Name", "Status"})
}
if len(sites) == 0 {
return
}
for _, site := range sites {
t.AddRow([]string{site.Name, site.Status})
}
fmt.Println(t.AsBuffer().String())
}
// onSSH executes 'tsh ssh' command
func onSSH(cf *CLIConf) {
tc, err := makeClient(cf, false)
if err != nil {
utils.FatalError(err)
}
tc.Stdin = os.Stdin
if err = tc.SSH(cf.Context, cf.RemoteCommand, cf.LocalExec); err != nil {
// exit with the same exit status as the failed command:
if tc.ExitStatus != 0 {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(tc.ExitStatus)
} else {
utils.FatalError(err)
}
}
}
// onBenchmark executes benchmark
func onBenchmark(cf *CLIConf) {
tc, err := makeClient(cf, false)
if err != nil {
utils.FatalError(err)
}
result, err := tc.Benchmark(cf.Context, client.Benchmark{
Command: cf.RemoteCommand,
Threads: cf.BenchThreads,
Duration: cf.BenchDuration,
Rate: cf.BenchRate,
})
if err != nil {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(255)
}
fmt.Printf("\n")
fmt.Printf("* Requests originated: %v\n", result.RequestsOriginated)
fmt.Printf("* Requests failed: %v\n", result.RequestsFailed)
if result.LastError != nil {
fmt.Printf("* Last error: %v\n", result.LastError)
}
fmt.Printf("\nHistogram\n\n")
t := asciitable.MakeTable([]string{"Percentile", "Duration"})
for _, quantile := range []float64{25, 50, 75, 90, 95, 99, 100} {
t.AddRow([]string{fmt.Sprintf("%v", quantile),
fmt.Sprintf("%v ms", result.Histogram.ValueAtQuantile(quantile)),
})
}
io.Copy(os.Stdout, t.AsBuffer())
fmt.Printf("\n")
}
// onJoin executes 'ssh join' command
func onJoin(cf *CLIConf) {
tc, err := makeClient(cf, true)
if err != nil {
utils.FatalError(err)
}
sid, err := session.ParseID(cf.SessionID)
if err != nil {
utils.FatalError(fmt.Errorf("'%v' is not a valid session ID (must be GUID)", cf.SessionID))
}
if err = tc.Join(context.TODO(), cf.Namespace, *sid, nil); err != nil {
utils.FatalError(err)
}
}
// onSCP executes 'tsh scp' command
func onSCP(cf *CLIConf) {
tc, err := makeClient(cf, false)
if err != nil {
utils.FatalError(err)
}
if err := tc.SCP(context.TODO(), cf.CopySpec, int(cf.NodePort), cf.RecursiveCopy, cf.Quiet); err != nil {
// exit with the same exit status as the failed command:
if tc.ExitStatus != 0 {
fmt.Fprintln(os.Stderr, utils.UserMessageFromError(err))
os.Exit(tc.ExitStatus)
} else {
utils.FatalError(err)
}
}
}
// makeClient takes the command-line configuration and constructs & returns
// a fully configured TeleportClient object
func makeClient(cf *CLIConf, useProfileLogin bool) (tc *client.TeleportClient, err error) {
// apply defaults
if cf.MinsToLive == 0 {
cf.MinsToLive = int32(defaults.CertDuration / time.Minute)
}
// split login & host
hostLogin := cf.NodeLogin
var labels map[string]string
if cf.UserHost != "" {
parts := strings.Split(cf.UserHost, "@")
if len(parts) > 1 {
hostLogin = parts[0]
cf.UserHost = parts[1]
}
// see if remote host is specified as a set of labels
if strings.Contains(cf.UserHost, "=") {
labels, err = client.ParseLabelSpec(cf.UserHost)
if err != nil {
return nil, err
}
}
}
fPorts, err := client.ParsePortForwardSpec(cf.LocalForwardPorts)
if err != nil {
return nil, err
}
// 1: start with the defaults
c := client.MakeDefaultConfig()
// Look if a user identity was given via -i flag
if cf.IdentityFileIn != "" {
var (
key *client.Key
identityAuth ssh.AuthMethod
expiryDate time.Time
hostAuthFunc ssh.HostKeyCallback
)
// read the ID file and create an "auth method" from it:
key, hostAuthFunc, err = loadIdentity(cf.IdentityFileIn)
if err != nil {
return nil, trace.Wrap(err)
}
identityAuth, err = authFromIdentity(key)
if err != nil {
return nil, trace.Wrap(err)
}
c.AuthMethods = []ssh.AuthMethod{identityAuth}
if hostAuthFunc != nil {
c.HostKeyCallback = hostAuthFunc
}
// check the expiration date
expiryDate, _ = key.CertValidBefore()
if expiryDate.Before(time.Now()) {
fmt.Fprintf(os.Stderr, "WARNING: the certificate has expired on %v\n", expiryDate)
}
} else {
// load profile. if no --proxy is given use ~/.tsh/profile symlink otherwise
// fetch profile for exact proxy we are trying to connect to.
err = c.LoadProfile("", cf.Proxy)
if err != nil {
fmt.Printf("WARNING: Failed to load tsh profile for %q: %v\n", cf.Proxy, err)
}
}
// 3: override with the CLI flags
if cf.Namespace != "" {
c.Namespace = cf.Namespace
}
if cf.Username != "" {
c.Username = cf.Username
}
if cf.Proxy != "" {
c.ProxyHostPort = cf.Proxy
}
if len(fPorts) > 0 {
c.LocalForwardPorts = fPorts
}
if cf.SiteName != "" {
c.SiteName = cf.SiteName
}
// if host logins stored in profiles must be ignored...
if !useProfileLogin {
c.HostLogin = ""
}
if hostLogin != "" {
c.HostLogin = hostLogin
}
c.Host = cf.UserHost
c.HostPort = int(cf.NodePort)
c.Labels = labels
c.KeyTTL = time.Minute * time.Duration(cf.MinsToLive)
c.InsecureSkipVerify = cf.InsecureSkipVerify
c.Interactive = cf.Interactive
if !cf.NoCache {
c.CachePolicy = &client.CachePolicy{}
}
// check version compatibility of the server and client
c.CheckVersions = !cf.SkipVersionCheck
// parse compatibility parameter
certificateFormat, err := parseCertificateCompatibilityFlag(cf.Compatibility, cf.CertificateFormat)
if err != nil {
return nil, trace.Wrap(err)
}
c.CertificateFormat = certificateFormat
// copy the authentication connector over
c.AuthConnector = cf.AuthConnector
// copy over if we want agent forwarding or not
c.ForwardAgent = cf.ForwardAgent
return client.NewClient(c)
}
func parseCertificateCompatibilityFlag(compatibility string, certificateFormat string) (string, error) {
switch {
// if nothing is passed in, the role will decide
case compatibility == "" && certificateFormat == "":
return teleport.CertificateFormatUnspecified, nil
// supporting the old --compat format for backward compatibility
case compatibility != "" && certificateFormat == "":
return utils.CheckCertificateFormatFlag(compatibility)
// new documented flag --cert-format
case compatibility == "" && certificateFormat != "":
return utils.CheckCertificateFormatFlag(certificateFormat)
// can not use both
default:
return "", trace.BadParameter("--compat or --cert-format must be specified")
}
}
// refuseArgs helper makes sure that 'args' (list of CLI arguments)
// does not contain anything other than command
func refuseArgs(command string, args []string) {
for _, arg := range args {
if arg == command || strings.HasPrefix(arg, "-") {
continue
} else {
utils.FatalError(trace.BadParameter("unexpected argument: %s", arg))
}
}
}
// loadIdentity loads the private key + certificate from a file
// Returns:
// - client key: user's private key+cert
// - host auth callback: function to validate the host (may be null)
// - error, if somthing happens when reading the identityf file
//
// If the "host auth callback" is not returned, user will be prompted to
// trust the proxy server.
func loadIdentity(idFn string) (*client.Key, ssh.HostKeyCallback, error) {
logrus.Infof("Reading identity file: ", idFn)
f, err := os.Open(idFn)
if err != nil {
return nil, nil, trace.Wrap(err)
}
defer f.Close()
var (
keyBuf bytes.Buffer
state int // 0: not found, 1: found beginning, 2: found ending
cert []byte
caCert []byte
)
// read the identity file line by line:
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if state != 1 {
if strings.HasPrefix(line, "ssh") {
cert = []byte(line)
continue
}
if strings.HasPrefix(line, "@cert-authority") {
caCert = []byte(line)
continue
}
}
if state == 0 && strings.HasPrefix(line, "-----BEGIN") {
state = 1
keyBuf.WriteString(line)
keyBuf.WriteRune('\n')
continue
}
if state == 1 {
keyBuf.WriteString(line)
if strings.HasPrefix(line, "-----END") {
state = 2
} else {
keyBuf.WriteRune('\n')
}
}
}
// did not find the certificate in the file? look in a separate file with
// -cert.pub prefix
if len(cert) == 0 {
certFn := idFn + "-cert.pub"
logrus.Infof("certificate not found in %s. looking in %s", idFn, certFn)
cert, err = ioutil.ReadFile(certFn)
if err != nil {
return nil, nil, trace.Wrap(err)
}
}
// validate both by parsing them:
privKey, err := ssh.ParseRawPrivateKey(keyBuf.Bytes())
if err != nil {
return nil, nil, trace.BadParameter("invalid identity: %s. %v", idFn, err)
}
signer, err := ssh.NewSignerFromKey(privKey)
if err != nil {
return nil, nil, trace.Wrap(err)
}
var hostAuthFunc ssh.HostKeyCallback = nil
// validate CA (cluster) cert
if len(caCert) > 0 {
_, _, pkey, _, _, err := ssh.ParseKnownHosts(caCert)
if err != nil {
return nil, nil, trace.BadParameter("CA cert parsing error: %v. cert line :%v",
err.Error(), string(caCert))
}
// found CA cert in the indentity file? construct the host key checking function
// and return it:
hostAuthFunc = func(host string, a net.Addr, hostKey ssh.PublicKey) error {
clusterCert, ok := hostKey.(*ssh.Certificate)
if ok {
hostKey = clusterCert.SignatureKey
}
if !sshutils.KeysEqual(pkey, hostKey) {
err = trace.AccessDenied("host %v is untrusted", host)
logrus.Error(err)
return err
}
return nil
}
}
return &client.Key{
Priv: keyBuf.Bytes(),
Pub: signer.PublicKey().Marshal(),
Cert: cert,
}, hostAuthFunc, nil
}
// authFromIdentity returns a standard ssh.Authmethod for a given identity file
func authFromIdentity(k *client.Key) (ssh.AuthMethod, error) {
signer, err := sshutils.NewSigner(k.Priv, k.Cert)
if err != nil {
return nil, trace.Wrap(err)
}
return client.NewAuthMethodForCert(signer), nil
}
// onShow reads an identity file (a public SSH key or a cert) and dumps it to stdout
func onShow(cf *CLIConf) {
key, _, err := loadIdentity(cf.IdentityFileIn)
// unmarshal certificate bytes into a ssh.PublicKey
cert, _, _, _, err := ssh.ParseAuthorizedKey(key.Cert)
if err != nil {
utils.FatalError(err)
}
// unmarshal private key bytes into a *rsa.PrivateKey
priv, err := ssh.ParseRawPrivateKey(key.Priv)
if err != nil {
utils.FatalError(err)
}
pub, err := ssh.ParsePublicKey(key.Pub)
if err != nil {
utils.FatalError(err)
}
fmt.Printf("Cert: %#v\nPriv: %#v\nPub: %#v\n",
cert, priv, pub)
fmt.Printf("Fingerprint: %s\n", ssh.FingerprintSHA256(pub))
}
// printStatus prints the status of the profile.
func printStatus(p *client.ProfileStatus, isActive bool) {
var prefix string
if isActive {
prefix = "> "
} else {
prefix = " "
}
duration := p.ValidUntil.Sub(time.Now())
humanDuration := "EXPIRED"
if duration.Nanoseconds() > 0 {
humanDuration = fmt.Sprintf("valid for %v", duration.Round(time.Minute))
}
fmt.Printf("%vProfile URL: %v\n", prefix, p.ProxyURL.String())
fmt.Printf(" Logged in as: %v\n", p.Username)
if p.Cluster != "" {
fmt.Printf(" Cluster: %v\n", p.Cluster)
}
fmt.Printf(" Roles: %v*\n", strings.Join(p.Roles, ", "))
fmt.Printf(" Logins: %v\n", strings.Join(p.Logins, ", "))
fmt.Printf(" Valid until: %v [%v]\n", p.ValidUntil, humanDuration)
fmt.Printf(" Extensions: %v\n\n", strings.Join(p.Extensions, ", "))
}
// onStatus command shows which proxy the user is logged into and metadata
// about the certificate.
func onStatus(cf *CLIConf) {
// Get the status of the active profile ~/.tsh/profile as well as the status
// of any other proxies the user is logged into.
profile, profiles, err := client.Status("", cf.Proxy)
if err != nil {
if trace.IsNotFound(err) {
fmt.Printf("Not logged in.\n")
return
}
utils.FatalError(err)
}
printProfiles(profile, profiles)
}
func | (profile *client.ProfileStatus, profiles []*client.ProfileStatus) {
// Print the active profile.
if profile != nil {
printStatus(profile, true)
}
// Print all other profiles.
for _, p := range profiles {
printStatus(p, false)
}
// If we are printing profile, add a note that even though roles are listed
// here, they are only available in Enterprise.
if profile != nil || len(profiles) > 0 {
fmt.Printf("\n* RBAC is only available in Teleport Enterprise\n")
fmt.Printf(" https://gravitaitonal.com/teleport/docs/enteprise\n")
}
}
// host is a utility function that extracts
// host from the host:port pair, in case of any error
// returns the original value
func host(in string) string {
out, err := utils.Host(in)
if err != nil {
return in
}
return out
}
| printProfiles |
models.py | from django.db import models
from authors import settings
from authors.apps.articles.models import Article
from authors.apps.profiles.models import Profile
# Create your models here.
class | (models.Model):
"""model for reporting an article"""
reporter = models.ForeignKey(Profile, on_delete=models.CASCADE)
article = models.ForeignKey(Article, to_field="slug", on_delete=models.CASCADE)
violation_subject = models.CharField(max_length=100, blank=False, null=False)
violation_report = models.CharField(max_length=300, blank=True, null=True)
report_status = models.CharField(max_length=20, default='pending')
submission_date = models.DateTimeField(auto_now_add=True, editable=False)
| ReportArticle |
runtime2.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
// defined constants
const (
// G status
//
// Beyond indicating the general state of a G, the G status
// acts like a lock on the goroutine's stack (and hence its
// ability to execute user code).
//
// If you add to this list, add to the list
// of "okay during garbage collection" status
// in mgcmark.go too.
// _Gidle means this goroutine was just allocated and has not yet been initialized.
// G刚刚被创建,尚未初始化
_Gidle = iota // 0
// _Grunnable means this goroutine is on a run queue. It is
// not currently executing user code. The stack is not owned.
// G处于本地或者全局待运行队列中,准备运行,未持有可用栈
_Grunnable // 1
// _Grunning means this goroutine may execute user code. The
// stack is owned by this goroutine. It is not on a run queue.
// It is assigned an M and a P.
// G正在运行中,持有可用栈,未处于待运行队列中,和M绑定
_Grunning // 2
// _Gsyscall means this goroutine is executing a system call.
// It is not executing user code. The stack is owned by this
// goroutine. It is not on a run queue. It is assigned an M.
// G正处于系统调用中,持有可用栈,未处于待运行队列中,和M绑定
_Gsyscall // 3
// _Gwaiting means this goroutine is blocked in the runtime.
// It is not executing user code. It is not on a run queue,
// but should be recorded somewhere (e.g., a channel wait
// queue) so it can be ready()d when necessary. The stack is
// not owned *except* that a channel operation may read or
// write parts of the stack under the appropriate channel
// lock. Otherwise, it is not safe to access the stack after a
// goroutine enters _Gwaiting (e.g., it may get moved).
// G处于等待中,等待某条件满足,未运行用户代码,未处于待运行队列中,但是可以索引.
_Gwaiting // 4
// _Gmoribund_unused is currently unused, but hardcoded in gdb
// scripts.
_Gmoribund_unused // 5
// _Gdead means this goroutine is currently unused. It may be just exited, on a free list, or just being initialized.
// It is not executing user code. It may or may not have a stack allocated.
// The G and its stack (if any) are owned by the M that is exiting the G or that obtained the G from the free list.
// G已中止
// 当前G未被使用,执行完用户代码处于复用队列中或者是刚初始化完成.
_Gdead // 6
// _Genqueue_unused is currently unused.
_Genqueue_unused // 7
// _Gcopystack means this goroutine's stack is being moved. It
// is not executing user code and is not on a run queue. The
// stack is owned by the goroutine that put it in _Gcopystack.
// G的栈正在扩展, 下一轮重试.栈拷贝中,未运行用户代码,不位于待运行队列中
_Gcopystack // 8
// _Gscan combined with one of the above states other than
// _Grunning indicates that GC is scanning the stack. The
// goroutine is not executing user code and the stack is owned
// by the goroutine that set the _Gscan bit.
//
// _Gscanrunning is different: it is used to briefly block
// state transitions while GC signals the G to scan its own
// stack. This is otherwise like _Grunning.
//
// atomicstatus&~Gscan gives the state the goroutine will
// return to when the scan completes.
// 表示G的栈正处于GC扫描中,和其他状态一起使用
_Gscan = 0x1000
_Gscanrunnable = _Gscan + _Grunnable // 0x1001
_Gscanrunning = _Gscan + _Grunning // 0x1002
_Gscansyscall = _Gscan + _Gsyscall // 0x1003
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004
)
const (
// P status
// P处于空闲状态,未运行用户代码,也没有运行调度循环.
_Pidle = iota
// 仅此P允许从运行中更改。被M持有,运行用户代码或者运行调度
_Prunning // Only this P is allowed to change from _Prunning.
// 未运行用户代码,绑定的M在执行系统调用.此时可以被其他M偷取
_Psyscall
// 被M持有,处于STW阶段
_Pgcstop
// P不再被使用,当GOMAXPROCS缩小时会产生, 当GOMAXPROCS扩大时会再复用
_Pdead
)
// Mutual exclusion locks. In the uncontended case,
// as fast as spin locks (just a few user-level instructions),
// but on the contention path they sleep in the kernel.
// A zeroed Mutex is unlocked (no need to initialize each lock).
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
// sleep and wakeup on one-time events.
// before any calls to notesleep or notewakeup,
// must call noteclear to initialize the Note.
// then, exactly one thread can call notesleep
// and exactly one thread can call notewakeup (once).
// once notewakeup has been called, the notesleep
// will return. future notesleep will return immediately.
// subsequent noteclear must be called only after
// previous notesleep has returned, e.g. it's disallowed
// to call noteclear straight after notewakeup.
//
// notetsleep is like notesleep but wakes up after
// a given number of nanoseconds even if the event
// has not yet happened. if a goroutine uses notetsleep to
// wake up early, it must wait to call noteclear until it
// can be sure that no other goroutine is calling
// notewakeup.
//
// notesleep/notetsleep are generally called on g0,
// notetsleepg is similar to notetsleep but is called on user g.
type note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type funcval struct {
fn uintptr
// variable-size, fn-specific data here
}
type iface struct {
tab *itab
data unsafe.Pointer
}
type eface struct {
_type *_type
data unsafe.Pointer
}
func efaceOf(ep *interface{}) *eface {
return (*eface)(unsafe.Pointer(ep))
}
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs and Ps are always reachable via true pointers in the
// allgs and allp lists or (during allocation before they reach those lists)
// from stack variables.
//
// Ms are always reachable via true pointers either from allm or
// freem. Unlike Gs and Ps we do free Ms, so it's important that
// nothing ever hold an muintptr across a safe point.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
//
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
// Instead of figuring out how to emit the write barriers missing in the
// assembly manipulation, we change the type of the field to uintptr,
// so that it does not require write barriers at all.
//
// Goroutine structs are published in the allg list and never freed.
// That will keep the goroutine structs from being collected.
// There is never a time that Gobuf.g's contain the only references
// to a goroutine: the publishing of the goroutine in allg comes first.
// Goroutine pointers are also kept in non-GC-visible places like TLS,
// so I can't see them ever moving. If we did want to start moving data
// in the GC, we'd need to allocate the goroutine structs from an
// alternate arena. Using guintptr doesn't make that problem any worse.
type guintptr uintptr
//go:nosplit
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
//go:nosplit
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
//go:nosplit
func (gp *guintptr) cas(old, new guintptr) bool {
return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
}
// setGNoWB performs *gp = new without a write barrier.
// For times when it's impractical to use a guintptr.
//go:nosplit
//go:nowritebarrier
func setGNoWB(gp **g, new *g) {
(*guintptr)(unsafe.Pointer(gp)).set(new)
}
type puintptr uintptr
//go:nosplit
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
//go:nosplit
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
// muintptr is a *m that is not tracked by the garbage collector.
//
// Because we do free Ms, there are some additional constrains on
// muintptrs:
//
// 1. Never hold an muintptr locally across a safe point.
//
// 2. Any muintptr in the heap must be owned by the M itself so it can
// ensure it is not in use when the last true *m is released.
type muintptr uintptr
//go:nosplit
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
//go:nosplit
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
// setMNoWB performs *mp = new without a write barrier.
// For times when it's impractical to use an muintptr.
//go:nosplit
//go:nowritebarrier
func setMNoWB(mp **m, new *m) {
(*muintptr)(unsafe.Pointer(mp)).set(new)
}
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
//
// ctxt is unusual with respect to GC: it may be a
// heap-allocated funcval, so GC needs to track it, but it
// needs to be set and cleared from assembly, where it's
// difficult to have write barriers. However, ctxt is really a
// saved, live register, and we only ever exchange it between
// the real register and the gobuf. Hence, we treat it as a
// root during stack scanning, which means assembly that saves
// and restores it doesn't need write barriers. It's still
// typed as a pointer so that any other writes from Go get
// write barriers.
// sp、pc和g的偏移量为libmach所知(硬编码)。
//
// ctxt对于GC来说是不寻常的:它可能是一个堆分配的funcval,所以GC需要跟踪它,但它需要从汇编中设置和清除,
// 在那里很难有写障碍。然而,ctxt实际上是一个保存的、活的寄存器,我们只在真实寄存器和gobuf之间交换它。
// 因此,在堆栈扫描过程中,我们把它当作一个根,这意味着保存和恢复它的汇编不需要写屏障。
// 它仍然被打造成一个指针,这样任何其他来自Go的写都会得到写入障碍。
sp uintptr // 协程栈顶
pc uintptr // 程序计数器
g guintptr // 协程本身的引用
ctxt unsafe.Pointer
ret sys.Uintreg
lr uintptr
bp uintptr // for GOEXPERIMENT=framepointer 协程栈底
}
// sudog represents a g in a wait list, such as for sending/receiving
// on a channel.
//
// sudog is necessary because the g ↔ synchronization object relation
// is many-to-many. A g can be on many wait lists, so there may be
// many sudogs for one g; and many gs may be waiting on the same
// synchronization object, so there may be many sudogs for one object.
//
// sudogs are allocated from a special pool. Use acquireSudog and
// releaseSudog to allocate and free them.
type sudog struct {
// The following fields are protected by the hchan.lock of the
// channel this sudog is blocking on. shrinkstack depends on
// this for sudogs involved in channel ops.
g *g
// isSelect indicates g is participating in a select, so
// g.selectDone must be CAS'd to win the wake-up race.
isSelect bool
next *sudog
prev *sudog
elem unsafe.Pointer // data element (may point to stack)
// The following fields are never accessed concurrently.
// For channels, waitlink is only accessed by g.
// For semaphores, all fields (including the ones above)
// are only accessed when holding a semaRoot lock.
acquiretime int64
releasetime int64
ticket uint32
parent *sudog // semaRoot binary tree
waitlink *sudog // g.waiting list or semaRoot
waittail *sudog // semaRoot
c *hchan // channel
}
type libcall struct {
fn uintptr
n uintptr // number of parameters
args uintptr // parameters
r1 uintptr // return values
r2 uintptr
err uintptr // error number
}
// describes how to handle callback
type wincallbackcontext struct {
gobody unsafe.Pointer // go function to call
argsize uintptr // callback arguments size (in bytes)
restorestack uintptr // adjust stack on return by (in bytes) (386 only)
cleanstack bool
}
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
// 栈描述了一个执行栈。
// 栈的边界正好是[lo, hi],两边没有隐含的数据结构。
type stack struct {
// 栈顶
lo uintptr
// 栈底
hi uintptr
}
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the C stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
// 堆栈参数。
// 堆栈描述了实际的堆栈内存。[stack.lo, stack.hi)。
// stackguard0是在Go堆栈增长序言中比较的堆栈指针。
// 它通常是 stack.lo+StackGuard,但可以是 StackPreempt 来触发抢占。
// stackguard1是在C语言堆栈增长序言中比较的堆栈指针。
// 在 g0 和 gsignal 堆栈上是 stack.lo+StackGuard。
// 在其他goroutine堆栈上是~0,以触发对morestackc的调用(和崩溃)。
// 当前G所持有的栈空间
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
// 当前协程持有的panic链表
_panic *_panic // innermost panic - offset known to liblink
// 协程持有的defer链表
_defer *_defer // innermost defer
// 和当前协程绑定的m
m *m // current m; offset known to arm liblink
// 协程持有的调度信息
sched gobuf
// 发生系统调用时的栈顶. gc时使用
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
// 发生系统调用时的程序计数器
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
stktopsp uintptr // expected sp at top of stack, to check in traceback
param unsafe.Pointer // passed parameter on wakeup
// 协程状态,详情参考2.1.2章节表
atomicstatus uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
goid int64
schedlink guintptr
waitsince int64 // approx time when the g become blocked
waitreason waitReason // if status==Gwaiting
// 抢占信号,抢占标志,如果需要抢占就将preempt设置为true。是否对当前协程发起抢占
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
sysexitticks int64 // cputicks when syscall has returned (for tracing)
traceseq uint64 // trace event sequencer
tracelastp puintptr // last P emitted an event for this goroutine
// 和当前协程锁定的M.一旦G和M锁定,则只有锁定的M才可以运行当前G.
lockedm muintptr
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
gopc uintptr // pc of go statement that created this goroutine
ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt []uintptr // cgo traceback context
labels unsafe.Pointer // profiler labels
timer *timer // cached timer for time.Sleep
selectDone uint32 // are we participating in a select and did someone win the race?
// Per-G GC state
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
}
type m struct {
// g0是一个特殊的Goroutine,它的调度信息中记录了mstart函数的PC、SP信息,
// 它的栈空间比较大,为调度循环的执行提供内存空间。此外它还有一些其他职责,
// 比如创建Goroutine,垃圾回收等等。
g0 *g // goroutine with scheduling stack
// 扩栈时使用的协程调度参数
morebuf gobuf // gobuf arg to morestack
divmod uint32 // div/mod denominator for arm - known to liblink
// Fields not known to debuggers.
// 操作系统线程ID
procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g
goSigStack gsignalStack // Go-allocated signal handling stack
sigmask sigset // storage for saved signal mask
// 线程本地存储
tls [6]uintptr // thread-local storage (for x86 extern register)
// 每个M在启动前首先执行的方法
mstartfn func()
// 正在当前M上运行的G
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
// 当前M所持有的P。M只有持有P的时候才可以运行用户代码
p puintptr // attached p for executing go code (nil if not executing go code)
// M在被唤起时优先尝试获取的P。
nextp puintptr
// 进入系统调用前保存的P,当系统调用结束后会优先尝试获取。
oldp puintptr // the p that was attached before executing a syscall
// 处理器的ID,只用于调试
id int64
mallocing int32
throwing int32
// 当值不为空时,保留G绑定当前M运行,不允许抢占
preemptoff string // if != "", keep curg running on this m
locks int32
dying int32
profilehz int32
// 自旋状态,处于自旋状态的线程未执行用于代码,正在寻找任务。
spinning bool // m is out of work and is actively looking for work
blocked bool // m is blocked on a note
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
incgo bool // m is executing a cgo call
freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
fastrand [2]uint32
needextram bool
traceback uint8
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
// 信号量,通知系统线程休眠或者唤醒系统线程
park note
// 存在全局变量allm,是一个单向链表,可以索引到全部m对象
alllink *m // on allm
schedlink muintptr
mcache *mcache
// 和当前M绑定的G,一旦绑定,则M只能运行绑定的G,绑定的G只能在绑定的M上运行,一旦G不满足运行条件,则G,M都会陷入休眠
lockedg guintptr
createstack [32]uintptr // stack that created this thread.
lockedExt uint32 // tracking for external LockOSThread
lockedInt uint32 // tracking for internal lockOSThread
nextwaitm muintptr // next m waiting for lock
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer
waittraceev byte
waittraceskip int
startingtrace bool
// 系统调用计数器
syscalltick uint32
thread uintptr // thread handle
freelink *m // on sched.freem
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
libcallpc uintptr // for cpu profiler
libcallsp uintptr
libcallg guintptr
syscall libcall // stores syscall parameters on windows
vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
vdsoPC uintptr // PC for traceback while in VDSO call
mOS
}
type p struct {
lock mutex
// 当前P的编号
id int32
// P的状态,具体参见章节2.2.2表
status uint32 // one of pidle/prunning/...
link puintptr
// 每完成一次调度循环该字段+1
schedtick uint32 // incremented on every scheduler call
// 每完成一次系统调用该字段+1
syscalltick uint32 // incremented on every system call
// 系统监控进程的最后一个循环计数.系统监控每完成一次循环tick+1
sysmontick sysmontick // last tick observed by sysmon
// P持有的M
m muintptr // back-link to associated m (nil if idle)
// P持有的mspan缓存
mcache *mcache
racectx uintptr
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
deferpoolbuf [5][32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
goidcacheend uint64
// Queue of runnable goroutines. Accessed without lock.
// 可运行队列对头
runqhead uint32
// 可运行队列队尾
runqtail uint32
// 组成可运行队列的数组
runq [256]guintptr
// runnext, if non-nil, is a runnable G that was ready'd by
// the current G and should be run next instead of what's in
// runq if there's time remaining in the running G's time
// slice. It will inherit the time left in the current time
// slice. If a set of goroutines is locked in a
// communicate-and-wait pattern, this schedules that set as a
// unit and eliminates the (potentially large) scheduling
// latency that otherwise arises from adding the ready'd
// goroutines to the end of the run queue.
// 优先运行的G缓存
runnext guintptr
// Available G's (status == Gdead)
// 待复用的G列表,运行结束的G会进入该列表
gFree struct {
// 可复用G链表
gList
n int32
}
sudogcache []*sudog
sudogbuf [128]*sudog
tracebuf traceBufPtr
// traceSweep indicates the sweep events should be traced.
// This is used to defer the sweep start event until a span
// has actually been swept.
traceSweep bool
// traceSwept and traceReclaimed track the number of bytes
// swept and reclaimed by sweeping in the current sweep loop.
traceSwept, traceReclaimed uintptr
palloc persistentAlloc // per-P to avoid mutex
// Per-P GC state
gcAssistTime int64 // Nanoseconds in assistAlloc
gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker
// 是否已启动后台标记任务,0未启动
gcBgMarkWorker guintptr
gcMarkWorkerMode gcMarkWorkerMode
// gcMarkWorkerStartTime is the nanotime() at which this mark
// worker started.
gcMarkWorkerStartTime int64
// gcw is this P's GC work buffer cache. The work buffer is
// filled by write barriers, drained by mutator assists, and
// disposed on certain GC state transitions.
gcw gcWork
// wbBuf is this P's GC write barrier buffer.
//
// TODO: Consider caching this in the running G.
wbBuf wbBuf
// 如果为1,则在下一个安全指针运行sched.safePointFn
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
pad cpu.CacheLinePad
}
type schedt struct {
// accessed atomically. keep at top to ensure alignment on 32-bit systems.
goidgen uint64
lastpoll uint64
lock mutex
// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
// sure to call checkdead().
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
nmfreed int64 // cumulative number of freed m's
ngsys uint32 // number of system goroutines; updated atomically
pidle puintptr // idle p's
npidle uint32
nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
// Global runnable queue.
runq gQueue
runqsize int32
// disable controls selective disabling of the scheduler.
//
// Use schedEnableUser to control this.
//
// disable is protected by sched.lock.
disable struct {
// user disables scheduling of user goroutines.
user bool
runnable gQueue // pending runnable Gs
n int32 // length of runnable
}
// Global cache of dead G's.
gFree struct {
lock mutex
stack gList // Gs with stacks
noStack gList // Gs without stacks
n int32
}
// Central cache of sudog structs.
sudoglock mutex
sudogcache *sudog
// Central pool of available defer structs of different sizes.
deferlock mutex
deferpool [5]*_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
freem *m
// 设置gc等待标记, 调度时看见此标记会进入等待
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
sysmonwait uint32
sysmonnote note
// safepointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
safePointFn func(*p)
safePointWait int32
safePointNote note
profilehz int32 // cpu profiling rate
procresizetime int64 // nanotime() of last change to gomaxprocs
totaltime int64 // ∫gomaxprocs dt up to procresizetime
}
// Values for the flags field of a sigTabT.
const (
_SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
_SigKill // if signal.Notify doesn't take it, exit quietly
_SigThrow // if signal.Notify doesn't take it, exit loudly
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
_SigSetStack // add SA_ONSTACK to libc handler
_SigUnblock // always unblock; see blockableSig
_SigIgn // _SIG_DFL action is to ignore the signal
)
// Layout of in-memory per-function information prepared by linker
// See https://golang.org/s/go12symtab.
// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
// and with package debug/gosym and with symtab.go in package runtime.
type _func struct {
entry uintptr // start pc
nameoff int32 // function name
args int32 // in/out args size
deferreturn uint32 // offset of a deferreturn block from entry, if any.
pcsp int32
pcfile int32
pcln int32
npcdata int32
funcID funcID // set for certain special runtime functions
_ [2]int8 // unused
nfuncdata uint8 // must be last
}
// Pseudo-Func that is returned for PCs that occur in inlined code.
// A *Func can be either a *_func or a *funcinl, and they are distinguished
// by the first uintptr.
type funcinl struct {
zero uintptr // set to 0 to distinguish from _func
entry uintptr // entry of the real (the "outermost") frame.
name string
file string
line int
}
// layout of Itab known to compilers
// allocated in non-garbage-collected memory
// Needs to be in sync with
// ../cmd/compile/internal/gc/reflect.go:/^func.dumptypestructs.
type itab struct {
inter *interfacetype
_type *_type
hash uint32 // copy of _type.hash. Used for type switches.
_ [4]byte
fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
}
// Lock-free stack node.
// // Also known to export_test.go.
type lfnode struct {
next uint64
pushcnt uintptr
}
type forcegcstate struct {
lock mutex
g *g
idle uint32
}
// startup_random_data holds random bytes initialized at startup. These come from
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
// extendRandom extends the random numbers in r[:n] to the whole slice r.
// Treats n<0 as n==0.
func extendRandom(r []byte, n int) {
if n < 0 {
n = 0
}
for n < len(r) {
// Extend random bits using hash function & time seed
w := n
if w > 16 {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
for i := 0; i < sys.PtrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
}
}
}
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in freedefer.
type _defer struct {
siz int32
started bool
sp uintptr // sp at time of defer
pc uintptr
fn *funcval
_panic *_panic // panic that is running defer
link *_defer
}
// A _panic holds information about an active panic.
//
// This is marked go:notinheap because _panic values must only ever
// live on the stack.
//
// The argp and link fields are stack pointers, but don't need special
// handling during stack growth: because they are pointer-typed and
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
//
//go:notinheap
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
link *_panic // link to earlier panic
recovered bool // whether this panic is over
aborted bool // the panic was aborted
}
// stack traces
type stkframe struct {
fn funcInfo // function being run
pc uintptr // program counter within fn
continpc uintptr // program counter where execution can continue, or 0 if not
lr uintptr // program counter at caller aka link register
sp uintptr // stack pointer at pc
fp uintptr // stack pointer at caller aka frame pointer
varp uintptr // top of local variables
argp uintptr // pointer to function arguments
arglen uintptr // number of bytes at argp
argmap *bitvector // force use of this argmap
}
// ancestorInfo records details of where a goroutine was started.
type ancestorInfo struct {
pcs []uintptr // pcs from the stack of this goroutine
goid int64 // goroutine id of this goroutine; original goroutine possibly dead
gopc uintptr // pc of go statement that created this goroutine
}
const (
_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
_TraceTrap // the initial PC, SP are from a trap, not a return PC from a call
_TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
)
// The maximum number of frames we print for a traceback
const _TracebackMaxFrames = 100
// A waitReason explains why a g | en stopped.
// See gopark. Do not re-use waitReasons, add new ones.
type waitReason uint8
const (
waitReasonZero waitReason = iota // ""
waitReasonGCAssistMarking // "GC assist marking"
waitReasonIOWait // "IO wait"
waitReasonChanReceiveNilChan // "chan receive (nil chan)"
waitReasonChanSendNilChan // "chan send (nil chan)"
waitReasonDumpingHeap // "dumping heap"
waitReasonGarbageCollection // "garbage collection"
waitReasonGarbageCollectionScan // "garbage collection scan"
waitReasonPanicWait // "panicwait"
waitReasonSelect // "select"
waitReasonSelectNoCases // "select (no cases)"
waitReasonGCAssistWait // "GC assist wait"
waitReasonGCSweepWait // "GC sweep wait"
waitReasonChanReceive // "chan receive"
waitReasonChanSend // "chan send"
waitReasonFinalizerWait // "finalizer wait"
waitReasonForceGGIdle // "force gc (idle)"
waitReasonSemacquire // "semacquire"
waitReasonSleep // "sleep"
waitReasonSyncCondWait // "sync.Cond.Wait"
waitReasonTimerGoroutineIdle // "timer goroutine (idle)"
waitReasonTraceReaderBlocked // "trace reader (blocked)"
waitReasonWaitForGCCycle // "wait for GC cycle"
waitReasonGCWorkerIdle // "GC worker (idle)"
)
var waitReasonStrings = [...]string{
waitReasonZero: "",
waitReasonGCAssistMarking: "GC assist marking",
waitReasonIOWait: "IO wait",
waitReasonChanReceiveNilChan: "chan receive (nil chan)",
waitReasonChanSendNilChan: "chan send (nil chan)",
waitReasonDumpingHeap: "dumping heap",
waitReasonGarbageCollection: "garbage collection",
waitReasonGarbageCollectionScan: "garbage collection scan",
waitReasonPanicWait: "panicwait",
waitReasonSelect: "select",
waitReasonSelectNoCases: "select (no cases)",
waitReasonGCAssistWait: "GC assist wait",
waitReasonGCSweepWait: "GC sweep wait",
waitReasonChanReceive: "chan receive",
waitReasonChanSend: "chan send",
waitReasonFinalizerWait: "finalizer wait",
waitReasonForceGGIdle: "force gc (idle)",
waitReasonSemacquire: "semacquire",
waitReasonSleep: "sleep",
waitReasonSyncCondWait: "sync.Cond.Wait",
waitReasonTimerGoroutineIdle: "timer goroutine (idle)",
waitReasonTraceReaderBlocked: "trace reader (blocked)",
waitReasonWaitForGCCycle: "wait for GC cycle",
waitReasonGCWorkerIdle: "GC worker (idle)",
}
func (w waitReason) String() string {
if w < 0 || w >= waitReason(len(waitReasonStrings)) {
return "unknown wait reason"
}
return waitReasonStrings[w]
}
var (
allglen uintptr
allm *m
allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable
allpLock mutex // Protects P-less reads of allp and all writes
gomaxprocs int32
ncpu int32
forcegc forcegcstate
sched schedt
newprocs int32
// Information about what cpu features are available.
// Packages outside the runtime should not use these
// as they are not an external api.
// Set on startup in asm_{386,amd64,amd64p32}.s
processorVersionInfo uint32
isIntel bool
lfenceBeforeRdtsc bool
goarm uint8 // set by cmd/link on arm systems
framepointer_enabled bool // set by cmd/link
)
// Set by the linker so the runtime can determine the buildmode.
var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
| oroutine has be |
server.go | package server
import (
"fmt"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/mgutz/ansi"
"github.com/rbg-tum/tum-live-monitor/monitor"
"github.com/rbg-tum/tum-live-monitor/web"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"net/http"
)
var (
// logging stuff:
getf = ansi.ColorFunc("green+h:white+h")
postf = ansi.ColorFunc("blue+b:white+h")
defaultf = ansi.ColorFunc("black+h:white+h")
http2xx = ansi.ColorFunc("green+h:white+h")
http1xx = ansi.ColorFunc("blue+b:white+h")
http3xx = ansi.ColorFunc("yellow+h:white+h")
http4xx = ansi.ColorFunc("red+b:white+h")
http5xx = ansi.ColorFunc("magenta+h:white+h")
)
type Server struct {
m *monitor.Monitor
}
func NewServer(m *monitor.Monitor) *Server {
return &Server{m: m}
}
func (s *Server) Run() {
log.Info("Starting server")
go s.m.Run()
gin.SetMode(gin.ReleaseMode) | api.Use(logger)
api.GET("/", func(context *gin.Context) {
context.JSON(http.StatusOK, gin.H{
"message": "Welcome to the TUM Live Monitor API",
})
})
port := viper.GetInt("port")
log.Infof("Listening on http://127.0.0.1:%d\n", port)
err := http.ListenAndServe(fmt.Sprintf(":%d", port), router)
log.Fatal(err)
} | router := gin.New()
router.Use(cors.New(cors.Config{AllowAllOrigins: true, AllowMethods: []string{"GET", "POST", "OPTIONS"}}))
web.Configure(router)
api := router.Group("/api") |
packet.rs | // Copyright (C) 2019-2021 The RustyBGP Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0 | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub(crate) mod bgp;
pub(crate) use self::bgp::{Attribute, Capability, Family, IpNet, Net};
pub(crate) mod bmp;
pub(crate) mod mrt;
pub(crate) mod rpki; | //
// Unless required by applicable law or agreed to in writing, software |
index.ts | /**
* accurate module.
* @module accurate
* @see doc https://github.com/Ipxxiao/accurate/tree/master/docs
*/
import calc from 'accurate-core'
import { getExprArray, exprArrayCalc } from './expr'
interface CalcFunc {
<T>(arg: T): number
<T>(...args: T[] | number[]): number
}
/**
* 扁平化数组
*
* @param {Function} calc
* @param {...number[]} args
* @returns {number}
*/
const flat = (calc: Function, ...args: number[]): number => {
const len: number = args.length
const first: number | number[] = args[0]
let accum: number = first
if (len) {
if (len === 1) {
if (Array.isArray(first)) {
// 递归
return flat(calc, ...first)
} else {
return first
}
} else {
for (let i: number = 1; i < len; i++) {
const item: number | number[] = args[i]
if (Array.isArray(accum)) {
// 递归
accum = flat(calc, ...accum, item)
} else if (Array.isArray(item)) {
// 递归
accum = flat(calc, accum, ...item)
} else {
accum = calc(accum, item)
}
}
return accum
}
} else {
return NaN
}
}
/**
* 精度加法计算
*
* @example
* ```js
* add(1.1, 0.3, 0.1)
* //=> 1.5
* ```
*
* @param {...number[]} args
* @returns {number}
*/
export const add: CalcFunc = (...args: number[]): number => {
return flat(calc.add, ...args)
}
export const addition: CalcFunc = add
/**
* 精度减法计算
*
* @example
* ```js
* subtract(1.1, 0.2, 0.1)
* //=> 0.8
* ```
*
* @param {...number[]} args
* @returns {number}
*/
export const subtract: CalcFunc = (...args: number[]): number => {
return flat(calc.subtract, ...args)
}
export const subtraction: CalcFunc = subtract
/**
* 精度乘法计算
*
* @example
* ```js
* multiply(1.1, 0.1, 0.2)
* //=> 0.022
* ```
*
* @param {...number[]} args
* @returns {number} | return flat(calc.multiply, ...args)
}
export const mul: CalcFunc = multiply
export const multiplication: CalcFunc = multiply
/**
* 精度除法计算
*
* @example
* ```js
* division(1.1, 10, 2)
* //=> 0.055
* ```
*
* @param {...number[]} args
* @returns {number}
*/
export const division: CalcFunc = (...args: number[]): number => {
return flat(calc.division, ...args)
}
export const divide: CalcFunc = division
/**
* 精度取模计算
*
* @example
* ```js
* modulo(1.1, 1)
* //=> 0.1
* ```
*
* @param {...number[]} args
* @returns {number}
*/
export const modulo: CalcFunc = (...args: number[]): number => {
return flat(calc.modulo, ...args)
}
export const modulus: CalcFunc = modulo
/**
* 算术表达式计算
*
* @example
* ```js
* expr('((1.1+0.3)*4+2*3)/(3-1*0.1)-1*5')
* //=> -1
* ```
*
* @param {string} expr
* @returns {number}
*/
export const expr = (expr: string): number => {
const exprArr: string[] = getExprArray(expr)
if (exprArr.length) {
return exprArrayCalc(exprArr)
} else {
return NaN
}
}
export default {
add,
addition,
subtract,
subtraction,
multiply,
mul,
multiplication,
division,
divide,
modulo,
modulus,
expr,
}; | */
export const multiply: CalcFunc = (...args: number[]): number => { |
heat_pump_water.py | # -*- coding: utf-8 -*-
from tespy.networks import Network
from tespy.components import (
Sink, Source, Splitter, Compressor, Condenser, Pump, HeatExchangerSimple,
Valve, Drum, HeatExchanger, CycleCloser
)
from tespy.connections import Connection, Ref
from tespy.tools.characteristics import CharLine
from tespy.tools.characteristics import load_default_char as ldc
from tespy.tools import document_model
import numpy as np
import pandas as pd
# %% network
nw = Network(
fluids=['water', 'NH3', 'air'], T_unit='C', p_unit='bar', h_unit='kJ / kg',
m_unit='kg / s'
)
# %% components
# sources & sinks
cc = CycleCloser('coolant cycle closer')
cc_cons = CycleCloser('consumer cycle closer')
amb = Source('ambient air')
amb_out1 = Sink('sink ambient 1')
amb_out2 = Sink('sink ambient 2')
# ambient system
sp = Splitter('splitter')
pu = Pump('pump')
# consumer system
cd = Condenser('condenser')
dhp = Pump('district heating pump')
cons = HeatExchangerSimple('consumer')
# evaporator system
| erp = Pump('evaporator reciculation pump')
# compressor-system
cp1 = Compressor('compressor 1')
cp2 = Compressor('compressor 2')
ic = HeatExchanger('intercooler')
# %% connections
# consumer system
c_in_cd = Connection(cc, 'out1', cd, 'in1')
cb_dhp = Connection(cc_cons, 'out1', dhp, 'in1')
dhp_cd = Connection(dhp, 'out1', cd, 'in2')
cd_cons = Connection(cd, 'out2', cons, 'in1')
cons_cf = Connection(cons, 'out1', cc_cons, 'in1')
nw.add_conns(c_in_cd, cb_dhp, dhp_cd, cd_cons, cons_cf)
# connection condenser - evaporator system
cd_ves = Connection(cd, 'out1', ves, 'in1')
nw.add_conns(cd_ves)
# evaporator system
ves_dr = Connection(ves, 'out1', dr, 'in1')
dr_erp = Connection(dr, 'out1', erp, 'in1')
erp_ev = Connection(erp, 'out1', ev, 'in2')
ev_dr = Connection(ev, 'out2', dr, 'in2')
dr_su = Connection(dr, 'out2', su, 'in2')
nw.add_conns(ves_dr, dr_erp, erp_ev, ev_dr, dr_su)
amb_p = Connection(amb, 'out1', pu, 'in1')
p_sp = Connection(pu, 'out1', sp, 'in1')
sp_su = Connection(sp, 'out1', su, 'in1')
su_ev = Connection(su, 'out1', ev, 'in1')
ev_amb_out = Connection(ev, 'out1', amb_out1, 'in1')
nw.add_conns(amb_p, p_sp, sp_su, su_ev, ev_amb_out)
# connection evaporator system - compressor system
su_cp1 = Connection(su, 'out2', cp1, 'in1')
nw.add_conns(su_cp1)
# compressor-system
cp1_he = Connection(cp1, 'out1', ic, 'in1')
he_cp2 = Connection(ic, 'out1', cp2, 'in1')
cp2_c_out = Connection(cp2, 'out1', cc, 'in1')
sp_ic = Connection(sp, 'out2', ic, 'in2')
ic_out = Connection(ic, 'out2', amb_out2, 'in1')
nw.add_conns(cp1_he, he_cp2, sp_ic, ic_out, cp2_c_out)
# %% component parametrization
# condenser system
cd.set_attr(pr1=0.99, pr2=0.99, ttd_u=5, design=['pr2', 'ttd_u'],
offdesign=['zeta2', 'kA_char'])
dhp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])
cons.set_attr(pr=0.99, design=['pr'], offdesign=['zeta'])
# water pump
pu.set_attr(eta_s=0.75, design=['eta_s'], offdesign=['eta_s_char'])
# evaporator system
kA_char1 = ldc('heat exchanger', 'kA_char1', 'DEFAULT', CharLine)
kA_char2 = ldc('heat exchanger', 'kA_char2', 'EVAPORATING FLUID', CharLine)
ev.set_attr(pr1=0.98, pr2=0.99, ttd_l=5,
kA_char1=kA_char1, kA_char2=kA_char2,
design=['pr1', 'ttd_l'], offdesign=['zeta1', 'kA_char'])
su.set_attr(pr1=0.98, pr2=0.99, ttd_u=2, design=['pr1', 'pr2', 'ttd_u'],
offdesign=['zeta1', 'zeta2', 'kA_char'])
erp.set_attr(eta_s=0.8, design=['eta_s'], offdesign=['eta_s_char'])
# compressor system
cp1.set_attr(eta_s=0.85, design=['eta_s'], offdesign=['eta_s_char'])
cp2.set_attr(eta_s=0.9, pr=3, design=['eta_s'], offdesign=['eta_s_char'])
ic.set_attr(pr1=0.99, pr2=0.98, design=['pr1', 'pr2'],
offdesign=['zeta1', 'zeta2', 'kA_char'])
# %% connection parametrization
# condenser system
c_in_cd.set_attr(fluid={'air': 0, 'NH3': 1, 'water': 0})
cb_dhp.set_attr(T=60, p=10, fluid={'air': 0, 'NH3': 0, 'water': 1})
cd_cons.set_attr(T=90)
# evaporator system cold side
erp_ev.set_attr(m=Ref(ves_dr, 1.25, 0), p0=5)
su_cp1.set_attr(p0=5, state='g')
# evaporator system hot side
# pumping at constant rate in partload
amb_p.set_attr(T=12, p=2, fluid={'air': 0, 'NH3': 0, 'water': 1},
offdesign=['v'])
sp_su.set_attr(offdesign=['v'])
ev_amb_out.set_attr(p=2, T=9, design=['T'])
# compressor-system
he_cp2.set_attr(Td_bp=5, p0=20, design=['Td_bp'])
ic_out.set_attr(T=30, design=['T'])
# %% key paramter
cons.set_attr(Q=-200e3)
# %% Calculation
nw.solve('design')
nw.print_results()
nw.save('heat_pump_water')
document_model(nw, filename='report_water_design.tex')
# offdesign test
nw.solve('offdesign', design_path='heat_pump_water')
document_model(nw, filename='report_water_offdesign.tex')
T_range = [6, 12, 18, 24, 30]
Q_range = np.array([100e3, 120e3, 140e3, 160e3, 180e3, 200e3, 220e3])
df = pd.DataFrame(columns=Q_range / -cons.Q.val)
for T in T_range:
amb_p.set_attr(T=T)
eps = []
for Q in Q_range:
cons.set_attr(Q=-Q)
nw.solve('offdesign', design_path='heat_pump_water')
if nw.lin_dep:
eps += [np.nan]
else:
eps += [
abs(cd.Q.val) / (cp1.P.val + cp2.P.val + erp.P.val + pu.P.val)
]
df.loc[T] = eps
df.to_csv('COP_water.csv') | ves = Valve('valve')
dr = Drum('drum')
ev = HeatExchanger('evaporator')
su = HeatExchanger('superheater') |
test_helpers.py | # coding: spec
from photons_canvas.points import helpers as php
import pytest
describe "Color":
it "has ZERO":
assert php.Color.ZERO == (0, 0, 0, 0)
it "has WHITE":
assert php.Color.WHITE == (0, 0, 1, 3500)
it "has EMPTIES":
assert php.Color.EMPTIES == (php.Color.ZERO, None)
it "can tell if a color is 'dead'":
assert php.Color.dead(None)
assert php.Color.dead((0, 0, 0, 0))
assert php.Color.dead((40, 1, 0, 3500))
assert not php.Color.dead((1, 0, 0.2, 0))
assert not php.Color.dead((40, 1, 0.1, 3500))
describe "override a color":
it "does nothing if no overrides":
color = (0, 1, 2, 3)
assert php.Color.override(color) is color
it "can override properties":
color = (0, 1, 2, 3)
assert php.Color.override(color, hue=20) == (20, 1, 2, 3)
assert php.Color.override(color, saturation=0.5) == (0, 0.5, 2, 3)
assert php.Color.override(color, brightness=0.5) == (0, 1, 0.5, 3)
assert php.Color.override(color, kelvin=20) == (0, 1, 2, 20)
assert php.Color.override(
color, hue=30, saturation=0.9, brightness=0.1, kelvin=9000
) == (30, 0.9, 0.1, 9000)
it "doesn't allow out of limits":
color = (40, 1, 2, 3)
assert php.Color.override(color, hue=-1) == (0, 1, 2, 3)
assert php.Color.override(color, saturation=-1) == (40, 0, 2, 3)
assert php.Color.override(color, brightness=-1) == (40, 1, 0, 3)
assert php.Color.override(color, kelvin=-1) == (40, 1, 2, 0)
want = (0, 0, 0, 0)
assert (
php.Color.override(color, hue=-1, saturation=-1, brightness=-1, kelvin=-1) == want
)
assert php.Color.override(color, hue=361) == (360, 1, 2, 3)
assert php.Color.override(color, saturation=1.1) == (40, 1, 2, 3)
assert php.Color.override(color, brightness=1.1) == (40, 1, 1, 3)
assert php.Color.override(color, kelvin=666661) == (40, 1, 2, 65535)
assert php.Color.override(
color, hue=361, saturation=1.1, brightness=1.1, kelvin=66666
) == (360, 1, 1, 65535)
describe "adjust":
it "can adjust hue":
color = (100, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-50) == (50, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=50) == (150, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=(60,)) == (60, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-150) == (0, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=400) == (360, 0.1, 0.3, 9000)
it "can adjust saturation":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.1) == (100, 0.4, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.2) == (100, 0.7, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=(0.3,)) == (100, 0.3, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.7) == (100, 0, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.9) == (100, 1, 0.3, 9000)
it "can adjust brightness":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, brightness_change=-0.1) == (100, 0.5, 0.3 - 0.1, 9000)
assert php.Color.adjust(color, brightness_change=0.2) == (100, 0.5, 0.5, 9000)
assert php.Color.adjust(color, brightness_change=(0.4,)) == (100, 0.5, 0.4, 9000)
assert php.Color.adjust(color, brightness_change=-0.7) == (100, 0.5, 0, 9000)
assert php.Color.adjust(color, brightness_change=0.9) == (100, 0.5, 1, 9000)
it "can adjust kelvin":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, kelvin_change=-1000) == (100, 0.5, 0.3, 8000)
assert php.Color.adjust(color, kelvin_change=1000) == (100, 0.5, 0.3, 10000)
assert php.Color.adjust(color, kelvin_change=(3500,)) == (100, 0.5, 0.3, 3500)
assert php.Color.adjust(color, kelvin_change=-45000) == (100, 0.5, 0.3, 0)
assert php.Color.adjust(color, kelvin_change=66666) == (100, 0.5, 0.3, 65535)
it "can adjust combination":
got = php.Color.adjust(
(100, 0.5, 0.3, 9000),
hue_change=20,
saturation_change=-0.2,
brightness_change=(0.8,),
kelvin_change=-3000,
)
assert got == (120, 0.3, 0.8, 6000)
describe "average_color":
def assertColorAlmostEqual(self, got, want):
assert want[0] == pytest.approx(got[0], rel=1e-3)
assert want[1] == pytest.approx(got[1], rel=1e-3)
assert want[2] == pytest.approx(got[2], rel=1e-3)
assert want[3] == pytest.approx(got[3], rel=1e-3)
it "returns None if no colors":
color = php.average_color([])
assert color is None
color = php.average_color([None])
assert color is None
it "averages saturation, brightness and kelvin":
colors = [
(0, 0.1, 0.2, 3500),
(0, 0.2, 0.3, 4500),
(0, 0.3, 0.4, 5500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 4500))
it "it sets kelvin to 3500 if 0": | (0, 0.2, 0.3, 0),
(0, 0.3, 0.4, 3500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 3500))
it "does special math to the hue":
#
# NOTE: I'm not sure how to test this maths so I've just put these values into the algorithm
# and asserting the results I got back.
#
colors = [(hue, 1, 1, 3500) for hue in (10, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (19.9999, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (48.2227, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (24.2583, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
colors = [(100, 1, 1, 3500), None, (300, 1, 1, 3500)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
describe "Points":
it "can get cols":
bounds = ((3, 8), (5, 1), (5, 4))
cols = php.Points.cols(bounds)
assert cols == [
[(3, 5), (3, 4), (3, 3), (3, 2)],
[(4, 5), (4, 4), (4, 3), (4, 2)],
[(5, 5), (5, 4), (5, 3), (5, 2)],
[(6, 5), (6, 4), (6, 3), (6, 2)],
[(7, 5), (7, 4), (7, 3), (7, 2)],
]
it "can get rows":
bounds = ((3, 8), (5, 1), (5, 4))
rows = php.Points.rows(bounds)
assert rows == [
[(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)],
[(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)],
[(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)],
[(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)],
]
it "can get all":
bounds = ((3, 8), (5, 1), (5, 4))
all_points = php.Points.all_points(bounds)
r1 = [(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)]
r2 = [(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)]
r3 = [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
r4 = [(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)]
assert all_points == [*r1, *r2, *r3, *r4]
it "can count points":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.count_points(bounds) == 20
bounds = ((1, 8), (6, 0), (7, 6))
assert php.Points.count_points(bounds) == 42
bounds = ((1, 1), (6, 6), (1, 1))
assert php.Points.count_points(bounds) == 0
it "can get points for a row":
bounds = ((3, 8), (5, 1), (5, 4))
row = php.Points.row(3, bounds)
assert row == [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
it "can get points for a column":
bounds = ((3, 8), (5, 1), (5, 4))
col = php.Points.col(2, bounds)
assert col == [(2, 5), (2, 4), (2, 3), (2, 2)]
it "can expand a bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.expand(bounds, 5) == ((-2, 13), (10, -4), (15, 14))
assert php.Points.expand(bounds, 3) == ((0, 11), (8, -2), (11, 10))
it "can get a point relative to bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.relative((4, 4), bounds) == (1, 1)
assert php.Points.relative((5, 2), bounds) == (2, 3)
it "can get the bottom row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.bottom_row(bounds) == 1
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.bottom_row(bounds) == 9
it "can get the top row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.top_row(bounds) == 5
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.top_row(bounds) == 11 | colors = [
(0, 0.1, 0.2, 3500), |
flag.go | package gocli
import (
"flag"
)
type context struct { //Use []string instead of directly touching context
val *[]string
raw string
}
func newContext(val *[]string, tags string) *context {
var err error
*val, err = splitQuotedFields(tags)
if err != nil {
panic(err)
}
return &context{val: val, raw: tags}
}
func (c *context) String() string {
return c.raw
}
func (c *context) IsBoolFlag() bool {
return false
}
func (c *context) Get() interface{} {
return *c.val
}
func (c *context) Set(s string) (err error) {
*c.val, err = splitQuotedFields(s)
return
}
//TagsFlagSetVar defines a tags flag, matching the tags flag on the go tool.
//
//The tags flag behaves exactly like the -tag flag for the go tool.
//
//The name of the flag is always tags.
//The usage string is set to one similar to the usage of the tags flag
//on the go tool.
func TagsFlagSetVar(f *flag.FlagSet, val *[]string, tags string) {
fv := newContext(val, tags)
f.Var(fv, "tags", "a list of build tags. See the documentation of the go/build package for more information about build tags.")
}
//TagsFlagVar defines a tags flag, matching the tags flag on the go tool.
//
//The tags flag behaves exactly like the -tag flag for the go tool.
//
//The name of the flag is always tags.
//The usage string is set to one similar to the usage of the tags flag
//on the go tool.
func TagsFlagVar(val *[]string, tags string) {
TagsFlagSetVar(flag.CommandLine, val, tags)
}
//TagsFlagSet defines a tags flag, matching the tags flag on the go tool.
//The returned value is the address of a []string variable that stores
//the value of that flag.
//
//The tags flag behaves exactly like the -tag flag for the go tool.
//
//The name of the flag is always tags.
//The usage string is set to one similar to the usage of the tags flag
//on the go tool.
func TagsFlagSet(f *flag.FlagSet, tags string) *[]string {
var ts []string
p := &ts
TagsFlagSetVar(f, p, tags)
return p
}
//TagsFlag defines a tags flag, matching the tags flag on the go tool.
//The returned value is the address of a []string variable that stores
//the value of that flag.
//
//The tags flag behaves exactly like the -tag flag for the go tool.
//
//The name of the flag is always tags.
//The usage string is set to one similar to the usage of the tags flag
//on the go tool.
func TagsFlag(tags string) *[]string | {
return TagsFlagSet(flag.CommandLine, tags)
} |
|
intflag.rs | #[doc = "Reader of register INTFLAG"]
pub type R = crate::R<u32, super::INTFLAG>;
#[doc = "Writer for register INTFLAG"]
pub type W = crate::W<u32, super::INTFLAG>;
#[doc = "Register INTFLAG `reset()`'s with value 0"]
impl crate::ResetValue for super::INTFLAG {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `OVR0`"]
pub type OVR0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR0`"]
pub struct OVR0_W<'a> {
w: &'a mut W,
}
impl<'a> OVR0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `OVR1`"]
pub type OVR1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR1`"]
pub struct OVR1_W<'a> {
w: &'a mut W,
}
impl<'a> OVR1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `OVR2`"]
pub type OVR2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR2`"]
pub struct OVR2_W<'a> {
w: &'a mut W,
}
impl<'a> OVR2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `OVR3`"]
pub type OVR3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR3`"]
pub struct OVR3_W<'a> {
w: &'a mut W,
}
impl<'a> OVR3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `OVR4`"]
pub type OVR4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR4`"]
pub struct OVR4_W<'a> {
w: &'a mut W,
}
impl<'a> OVR4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `OVR5`"]
pub type OVR5_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR5`"]
pub struct OVR5_W<'a> {
w: &'a mut W,
}
impl<'a> OVR5_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `OVR6`"]
pub type OVR6_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR6`"]
pub struct OVR6_W<'a> {
w: &'a mut W,
}
impl<'a> OVR6_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `OVR7`"]
pub type OVR7_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR7`"]
pub struct OVR7_W<'a> {
w: &'a mut W,
}
impl<'a> OVR7_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `EVD0`"]
pub type EVD0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD0`"]
pub struct EVD0_W<'a> {
w: &'a mut W,
}
impl<'a> EVD0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `EVD1`"]
pub type EVD1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD1`"]
pub struct EVD1_W<'a> {
w: &'a mut W,
}
impl<'a> EVD1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `EVD2`"]
pub type EVD2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD2`"]
pub struct EVD2_W<'a> {
w: &'a mut W,
}
impl<'a> EVD2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `EVD3`"]
pub type EVD3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD3`"]
pub struct EVD3_W<'a> {
w: &'a mut W,
}
impl<'a> EVD3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `EVD4`"]
pub type EVD4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD4`"]
pub struct EVD4_W<'a> {
w: &'a mut W,
}
impl<'a> EVD4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `EVD5`"]
pub type EVD5_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD5`"]
pub struct EVD5_W<'a> {
w: &'a mut W,
}
impl<'a> EVD5_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `EVD6`"]
pub type EVD6_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD6`"]
pub struct EVD6_W<'a> {
w: &'a mut W,
}
impl<'a> EVD6_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `EVD7`"]
pub type EVD7_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD7`"]
pub struct EVD7_W<'a> {
w: &'a mut W,
}
impl<'a> EVD7_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `OVR8`"]
pub type OVR8_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR8`"]
pub struct OVR8_W<'a> {
w: &'a mut W,
}
impl<'a> OVR8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `OVR9`"]
pub type OVR9_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR9`"]
pub struct OVR9_W<'a> {
w: &'a mut W,
}
impl<'a> OVR9_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `OVR10`"]
pub type OVR10_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR10`"]
pub struct OVR10_W<'a> {
w: &'a mut W,
}
impl<'a> OVR10_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `OVR11`"]
pub type OVR11_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OVR11`"]
pub struct OVR11_W<'a> {
w: &'a mut W,
}
impl<'a> OVR11_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `EVD8`"]
pub type EVD8_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD8`"]
pub struct EVD8_W<'a> {
w: &'a mut W,
}
impl<'a> EVD8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `EVD9`"]
pub type EVD9_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD9`"]
pub struct EVD9_W<'a> {
w: &'a mut W,
}
impl<'a> EVD9_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Reader of field `EVD10`"]
pub type EVD10_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD10`"]
pub struct EVD10_W<'a> {
w: &'a mut W,
}
impl<'a> EVD10_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Reader of field `EVD11`"]
pub type EVD11_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVD11`"]
pub struct EVD11_W<'a> {
w: &'a mut W,
}
impl<'a> EVD11_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
impl R {
#[doc = "Bit 0 - Channel 0 Overrun"]
#[inline(always)]
pub fn ovr0(&self) -> OVR0_R {
OVR0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Channel 1 Overrun"]
#[inline(always)]
pub fn ovr1(&self) -> OVR1_R {
OVR1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Channel 2 Overrun"]
#[inline(always)]
pub fn ovr2(&self) -> OVR2_R {
OVR2_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Channel 3 Overrun"]
#[inline(always)]
pub fn ovr3(&self) -> OVR3_R {
OVR3_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Channel 4 Overrun"]
#[inline(always)]
pub fn ovr4(&self) -> OVR4_R {
OVR4_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Channel 5 Overrun"]
#[inline(always)]
pub fn ovr5(&self) -> OVR5_R {
OVR5_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Channel 6 Overrun"]
#[inline(always)]
pub fn ovr6(&self) -> OVR6_R {
OVR6_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Channel 7 Overrun"]
#[inline(always)]
pub fn ovr7(&self) -> OVR7_R {
OVR7_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Channel 0 Event Detection"]
#[inline(always)]
pub fn evd0(&self) -> EVD0_R {
EVD0_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - Channel 1 Event Detection"]
#[inline(always)]
pub fn evd1(&self) -> EVD1_R {
EVD1_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - Channel 2 Event Detection"]
#[inline(always)]
pub fn evd2(&self) -> EVD2_R {
EVD2_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Channel 3 Event Detection"]
#[inline(always)]
pub fn evd3(&self) -> EVD3_R {
EVD3_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Channel 4 Event Detection"]
#[inline(always)]
pub fn evd4(&self) -> EVD4_R {
EVD4_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - Channel 5 Event Detection"]
#[inline(always)]
pub fn evd5(&self) -> EVD5_R {
EVD5_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - Channel 6 Event Detection"]
#[inline(always)]
pub fn evd6(&self) -> EVD6_R {
EVD6_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - Channel 7 Event Detection"]
#[inline(always)]
pub fn evd7(&self) -> EVD7_R {
EVD7_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 16 - Channel 8 Overrun"]
#[inline(always)]
pub fn ovr8(&self) -> OVR8_R {
OVR8_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - Channel 9 Overrun"]
#[inline(always)]
pub fn ovr9(&self) -> OVR9_R {
OVR9_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Channel 10 Overrun"]
#[inline(always)]
pub fn ovr10(&self) -> OVR10_R {
OVR10_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - Channel 11 Overrun"]
#[inline(always)]
pub fn ovr11(&self) -> OVR11_R {
OVR11_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 24 - Channel 8 Event Detection"]
#[inline(always)]
pub fn evd8(&self) -> EVD8_R {
EVD8_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - Channel 9 Event Detection"]
#[inline(always)]
pub fn evd9(&self) -> EVD9_R {
EVD9_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - Channel 10 Event Detection"]
#[inline(always)]
pub fn evd10(&self) -> EVD10_R {
EVD10_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 27 - Channel 11 Event Detection"]
#[inline(always)]
pub fn evd11(&self) -> EVD11_R {
EVD11_R::new(((self.bits >> 27) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Channel 0 Overrun"]
#[inline(always)]
pub fn ovr0(&mut self) -> OVR0_W {
OVR0_W { w: self }
}
#[doc = "Bit 1 - Channel 1 Overrun"]
#[inline(always)]
pub fn ovr1(&mut self) -> OVR1_W {
OVR1_W { w: self }
}
#[doc = "Bit 2 - Channel 2 Overrun"]
#[inline(always)]
pub fn | (&mut self) -> OVR2_W {
OVR2_W { w: self }
}
#[doc = "Bit 3 - Channel 3 Overrun"]
#[inline(always)]
pub fn ovr3(&mut self) -> OVR3_W {
OVR3_W { w: self }
}
#[doc = "Bit 4 - Channel 4 Overrun"]
#[inline(always)]
pub fn ovr4(&mut self) -> OVR4_W {
OVR4_W { w: self }
}
#[doc = "Bit 5 - Channel 5 Overrun"]
#[inline(always)]
pub fn ovr5(&mut self) -> OVR5_W {
OVR5_W { w: self }
}
#[doc = "Bit 6 - Channel 6 Overrun"]
#[inline(always)]
pub fn ovr6(&mut self) -> OVR6_W {
OVR6_W { w: self }
}
#[doc = "Bit 7 - Channel 7 Overrun"]
#[inline(always)]
pub fn ovr7(&mut self) -> OVR7_W {
OVR7_W { w: self }
}
#[doc = "Bit 8 - Channel 0 Event Detection"]
#[inline(always)]
pub fn evd0(&mut self) -> EVD0_W {
EVD0_W { w: self }
}
#[doc = "Bit 9 - Channel 1 Event Detection"]
#[inline(always)]
pub fn evd1(&mut self) -> EVD1_W {
EVD1_W { w: self }
}
#[doc = "Bit 10 - Channel 2 Event Detection"]
#[inline(always)]
pub fn evd2(&mut self) -> EVD2_W {
EVD2_W { w: self }
}
#[doc = "Bit 11 - Channel 3 Event Detection"]
#[inline(always)]
pub fn evd3(&mut self) -> EVD3_W {
EVD3_W { w: self }
}
#[doc = "Bit 12 - Channel 4 Event Detection"]
#[inline(always)]
pub fn evd4(&mut self) -> EVD4_W {
EVD4_W { w: self }
}
#[doc = "Bit 13 - Channel 5 Event Detection"]
#[inline(always)]
pub fn evd5(&mut self) -> EVD5_W {
EVD5_W { w: self }
}
#[doc = "Bit 14 - Channel 6 Event Detection"]
#[inline(always)]
pub fn evd6(&mut self) -> EVD6_W {
EVD6_W { w: self }
}
#[doc = "Bit 15 - Channel 7 Event Detection"]
#[inline(always)]
pub fn evd7(&mut self) -> EVD7_W {
EVD7_W { w: self }
}
#[doc = "Bit 16 - Channel 8 Overrun"]
#[inline(always)]
pub fn ovr8(&mut self) -> OVR8_W {
OVR8_W { w: self }
}
#[doc = "Bit 17 - Channel 9 Overrun"]
#[inline(always)]
pub fn ovr9(&mut self) -> OVR9_W {
OVR9_W { w: self }
}
#[doc = "Bit 18 - Channel 10 Overrun"]
#[inline(always)]
pub fn ovr10(&mut self) -> OVR10_W {
OVR10_W { w: self }
}
#[doc = "Bit 19 - Channel 11 Overrun"]
#[inline(always)]
pub fn ovr11(&mut self) -> OVR11_W {
OVR11_W { w: self }
}
#[doc = "Bit 24 - Channel 8 Event Detection"]
#[inline(always)]
pub fn evd8(&mut self) -> EVD8_W {
EVD8_W { w: self }
}
#[doc = "Bit 25 - Channel 9 Event Detection"]
#[inline(always)]
pub fn evd9(&mut self) -> EVD9_W {
EVD9_W { w: self }
}
#[doc = "Bit 26 - Channel 10 Event Detection"]
#[inline(always)]
pub fn evd10(&mut self) -> EVD10_W {
EVD10_W { w: self }
}
#[doc = "Bit 27 - Channel 11 Event Detection"]
#[inline(always)]
pub fn evd11(&mut self) -> EVD11_W {
EVD11_W { w: self }
}
}
| ovr2 |
TableLayout.js | import compose from 'recompose/compose'
import withHandlers from 'recompose/withHandlers'
import styled from 'styled-components'
import TableRow from './Table/TableRow'
import Table from './Table'
const StyledSplitPane = styled(SplitPane)`top: 52px;`
const enhance = compose(
inject('store'),
withHandlers({
onChange: props => size => props.store.configSetKey('tableColumnWidth', size),
}),
observer,
)
const TableLayout = ({ store, onChange }) => {
const { config } = store.app
const { id } = store.table
return (
<StyledSplitPane split="vertical" minSize={100} defaultSize={config.tableColumnWidth} onChange={onChange}>
<Table />
<div>{id && <TableRow />}</div>
</StyledSplitPane>
)
}
TableLayout.propTypes = {
store: PropTypes.object.isRequired,
onChange: PropTypes.func.isRequired,
}
export default enhance(TableLayout) | import React from 'react'
import PropTypes from 'prop-types'
import SplitPane from 'react-split-pane'
import { observer, inject } from 'mobx-react' |
|
nessie_test.go | package nessie
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"encoding/pem"
"math/big"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/gorilla/schema"
)
func TestRequest(t *testing.T) {
// Test structure to be serialized.
type payload struct {
A int `json:"a"`
}
var tests = []struct {
method string
resource string
sentPayload payload
wantPayload string
serverStatus int
wantStatus []int
wantError bool
}{
// All succeeding methods.
{"GET", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"DELETE", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
{"PUT", "/test", payload{}, "{\"a\":0}", http.StatusOK, []int{http.StatusOK}, false},
// Querystring test
{"GET", "/test?a=42", payload{}, "{\"a\":42}", http.StatusOK, []int{http.StatusOK}, false},
// Payload test.
{"GET", "/test", payload{42}, "{\"a\":42}", http.StatusOK, []int{http.StatusOK}, false},
// Expected failure.
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusInternalServerError}, false},
// Unexpected failure
{"POST", "/test", payload{}, "{\"a\":0}", http.StatusInternalServerError, []int{http.StatusOK}, true},
}
for _, tt := range tests {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.serverStatus)
request := &payload{}
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
t.Errorf("could not decode request body: %v", err)
return
}
decoder := schema.NewDecoder()
if err := r.ParseForm(); nil != err {
t.Errorf("could not parse form: %v", err)
}
if err := decoder.Decode(request, r.Form); nil != err {
t.Errorf("could not decode request: %v", err)
}
requestBytes, err := json.Marshal(request)
if nil != err {
return
}
requestStr := string(requestBytes)
if string(requestStr) != tt.wantPayload {
t.Errorf("unexpected payload, got=%s, want=%s", requestStr, tt.wantPayload)
}
}))
n := &nessusImpl{
apiURL: ts.URL,
client: &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
},
}
n.SetVerbose(true)
resp, err := n.Request(tt.method, tt.resource, tt.sentPayload, tt.wantStatus)
if tt.wantError {
if err == nil {
t.Errorf("got no error, expected one (%+v)", tt)
}
continue
}
if err != nil {
t.Errorf("error in Request: %v (%+v)", err, tt)
continue
}
if resp.StatusCode != tt.serverStatus {
t.Errorf("got status code=%d, wanted=%d", resp.StatusCode, tt.serverStatus)
}
}
}
func TestLogin(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(&loginResp{Token: "some token"})
if err != nil {
t.Fatalf("cannot serialize login response: %v", err)
}
w.Write(j)
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
if err := n.Login("username", "password"); err != nil {
t.Fatalf("got error during login: %v", err)
}
if got, want := n.AuthCookie(), "some token"; got != want {
t.Fatalf("wrong auth cookie, got=%q, want=%q", got, want)
}
}
func TestMethods(t *testing.T) {
var tests = []struct {
resp interface{}
statusCode int
call func(n Nessus)
}{
{&Session{}, http.StatusOK, func(n Nessus) { n.Session() }},
{&ServerProperties{}, http.StatusOK, func(n Nessus) { n.ServerProperties() }},
{&ServerStatus{}, http.StatusOK, func(n Nessus) { n.ServerStatus() }},
{&User{}, http.StatusOK, func(n Nessus) {
n.CreateUser("username", "pass", UserTypeLocal, Permissions32, "name", "[email protected]")
}},
{&listUsersResp{}, http.StatusOK, func(n Nessus) { n.ListUsers() }},
{nil, http.StatusOK, func(n Nessus) { n.DeleteUser(42) }},
{nil, http.StatusOK, func(n Nessus) { n.SetUserPassword(42, "newpass") }},
{&User{}, http.StatusOK, func(n Nessus) {
n.EditUser(42, Permissions128, "newname", "[email protected]")
}},
{[]PluginFamily{}, http.StatusOK, func(n Nessus) { n.PluginFamilies() }},
{&FamilyDetails{}, http.StatusOK, func(n Nessus) { n.FamilyDetails(42) }},
{&PluginDetails{}, http.StatusOK, func(n Nessus) { n.PluginDetails(42) }},
{[]Scanner{}, http.StatusOK, func(n Nessus) { n.Scanners() }},
{&listPoliciesResp{}, http.StatusOK, func(n Nessus) { n.Policies() }},
{&Scan{}, http.StatusOK, func(n Nessus) {
n.NewScan("editorUUID", "settingsName", 42, 43, 44, LaunchDaily, []string{"target1", "target2"})
}},
{&ListScansResponse{}, http.StatusOK, func(n Nessus) { n.Scans() }},
{[]Template{}, http.StatusOK, func(n Nessus) { n.ScanTemplates() }},
{[]Template{}, http.StatusOK, func(n Nessus) { n.PolicyTemplates() }},
{"id", http.StatusOK, func(n Nessus) { n.StartScan(42) }},
{nil, http.StatusOK, func(n Nessus) { n.PauseScan(42) }},
{nil, http.StatusOK, func(n Nessus) { n.ResumeScan(42) }},
{nil, http.StatusOK, func(n Nessus) { n.StopScan(42) }},
{nil, http.StatusOK, func(n Nessus) { n.DeleteScan(42) }},
{&ScanDetailsResp{}, http.StatusOK, func(n Nessus) { n.ScanDetails(42) }},
{[]TimeZone{}, http.StatusOK, func(n Nessus) { n.Timezones() }},
{[]Folder{}, http.StatusOK, func(n Nessus) { n.Folders() }},
{nil, http.StatusOK, func(n Nessus) { n.CreateFolder("name") }},
{nil, http.StatusOK, func(n Nessus) { n.EditFolder(42, "newname") }},
{nil, http.StatusOK, func(n Nessus) { n.DeleteFolder(42) }},
{42, http.StatusOK, func(n Nessus) { n.ExportScan(42, ExportPDF) }},
{true, http.StatusOK, func(n Nessus) { n.ExportFinished(42, 43) }},
{[]byte("raw export"), http.StatusOK, func(n Nessus) { n.DownloadExport(42, 43) }},
{[]Permission{}, http.StatusOK, func(n Nessus) { n.Permissions("scanner", 42) }},
}
for _, tt := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tt.statusCode)
if tt.resp != nil {
j, err := json.Marshal(tt.resp)
if err != nil {
t.Fatalf("cannot serialize response: %v", err)
}
w.Write(j)
}
}))
defer server.Close()
n, err := NewInsecureNessus(server.URL)
if err != nil {
t.Fatalf("cannot create nessus instance: %v", err)
}
n.SetVerbose(true)
tt.call(n)
}
}
func TestSha256Fingerprint(t *testing.T) {
want := "AzuD2SQxVI4TQkkDwjWpkir1bdNNU8m3KzfPFYSJIT4="
got := sha256Fingerprint([]byte("abc123!"))
if got != want {
t.Errorf("fingerprint calculation failed, got=%v, want=%v", got, want)
}
}
func generateCert(validNotBefore time.Time, validNotAfter time.Time) (*x509.Certificate, tls.Certificate, error) {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, tls.Certificate{}, err
}
template := x509.Certificate{
BasicConstraintsValid: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
NotBefore: validNotBefore,
NotAfter: validNotAfter,
SerialNumber: big.NewInt(1),
Subject: pkix.Name{Organization: []string{"Example Inc"}},
}
certDer, err := x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
if err != nil {
return nil, tls.Certificate{}, err
}
certX509, err := x509.ParseCertificate(certDer)
if err != nil {
return nil, tls.Certificate{}, err
}
keypair, err := tls.X509KeyPair(
pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDer}),
pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privKey)}))
return certX509, keypair, err
}
// An empty fingerprint would allow to create a nessus instance without any verification.
func TestNewFingerprintedNessus(t *testing.T) {
_, err := NewFingerprintedNessus("https://192.0.2.1", []string{})
if err == nil {
t.Fatalf("should not accept empty fingerprint: %v", err)
}
_, err = NewFingerprintedNessus("https://192.0.2.1", []string{"a"})
if err != nil {
t.Fatalf("should accept a non-empty fingerprint: %v", err)
}
}
func TestCreateDialTLSFuncToVerifyFingerprint(t *testing.T) {
var tests = []struct {
fingerprint func([]byte) string
validNotBefore time.Time
validNotAfter time.Time
wantError bool
}{
// Correct fingerprint, should succeed.
{func(cert []byte) string { return sha256Fingerprint(cert) }, time.Now().Truncate(1 * time.Hour), time.Now().Add(1 * time.Hour), false},
// Correct fingerprint, cert not yet valid, should succeed.
{func(cert []byte) string { return sha256Fingerprint(cert) }, time.Now().Add(1 * time.Hour), time.Now().Add(2 * time.Hour), false},
// Correct fingerprint, cert not valid anymore, should succeed.
{func(cert []byte) string { return sha256Fingerprint(cert) }, time.Now().Truncate(2 * time.Hour), time.Now().Truncate(1 * time.Hour), false},
// No fingerprint given (empty string), should fail.
{func(_ []byte) string { return "" }, time.Now().Truncate(1 * time.Hour), time.Now().Add(1 * time.Hour), true},
// Wrong fingerprint given, should fail.
{func(_ []byte) string { return "TW1NeU5tSTBObUkyT0dabVl6WTRabVk1T1dJME5UTmpNV1E=" }, time.Now().Truncate(1 * time.Hour), time.Now().Add(1 * time.Hour), true},
// Wrong fingerprint given, should fail.
{func(_ []byte) string { return "x" }, time.Now().Truncate(1 * time.Hour), time.Now().Add(1 * time.Hour), true},
} | }
srvConfig := &tls.Config{Certificates: []tls.Certificate{srvKeypair}}
srvListener, err := tls.Listen("tcp", "127.0.0.1:0", srvConfig)
if err != nil {
t.Fatalf("cannot listen: %v", err)
return
}
go http.Serve(srvListener, http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {}))
cConfig := &tls.Config{
InsecureSkipVerify: true,
}
wantFingerprint := tt.fingerprint(srvCertX509.RawSubjectPublicKeyInfo)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: cConfig,
DialTLS: createDialTLSFuncToVerifyFingerprint([]string{wantFingerprint}, cConfig),
},
}
_, err = client.Get("https://" + srvListener.Addr().String())
if tt.wantError {
if err == nil {
t.Errorf("got no error, expected one (%+v)", tt)
}
continue
}
if err != nil {
t.Errorf("error during fingerprint verification: %v (%+v)", err, tt)
continue
}
}
} | for _, tt := range tests {
srvCertX509, srvKeypair, err := generateCert(tt.validNotBefore, tt.validNotAfter)
if err != nil {
t.Fatalf("failed to create x509 key pair: %v", err) |
EvalError.py | class | (Exception):
def __init__(self, message: str, sexp):
super().__init__(message)
self._sexp = sexp
| EvalError |
byte_comparator.go | package comparators
//ByteComparator for comparing the byte values
type ByteComparator struct {
}
// NewByteComparator returns the new byte comparator
func NewByteComparator() *ByteComparator |
// Compare two byte values and returns
// 0 if a = b
// -1 if a < b
// 1 if a > b
func (comparator *ByteComparator) Compare(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
| {
return &ByteComparator{}
} |
dlt_benchmarks.rs | #[macro_use]
extern crate criterion;
extern crate dlt;
// use dlt::dlt::*;
use bytes::{BytesMut};
use criterion::Criterion;
fn dlt_benchmark(c: &mut Criterion) {
c.bench_function("format header", |b| {
let timestamp = dlt::dlt::DltTimeStamp {
seconds: 0x4DC9_2C26,
microseconds: 0x000C_A2D8,
};
b.iter(|| format!("{}", timestamp))
});
// c.bench_function("format message", |b| {
// let timestamp = DltTimeStamp {
// seconds: 0x4DC9_2C26,
// microseconds: 0x000C_A2D8,
// };
// let storage_header = StorageHeader {
// timestamp,
// ecu_id: "abc".to_string(),
// };
// let header: StandardHeader = StandardHeader {
// version: 1,
// has_extended_header: true,
// big_endian: true,
// message_counter: 0x33,
// overall_length: 0x1,
// ecu_id: Some("abc".to_string()),
// session_id: None,
// timestamp: Some(5),
// };
// let extended_header = dlt::dlt::ExtendedHeader {
// argument_count: 2,
// verbose: true,
// message_type: dlt::dlt::MessageType::Log(LogLevel::Warn), | // application_id: "abc".to_string(),
// context_id: "CON".to_string(),
// };
// let type_info = TypeInfo {
// kind: TypeInfoKind::Bool,
// coding: StringCoding::UTF8,
// has_variable_info: true,
// has_trace_info: false,
// };
// let argument = Argument {
// type_info,
// name: Some("foo".to_string()),
// unit: None,
// fixed_point: None,
// value: Value::Bool(true),
// };
// let payload = PayloadContent::Verbose(vec![argument]);
// let message = Message {
// storage_header: Some(storage_header),
// header,
// extended_header: Some(extended_header),
// payload,
// fibex_metadata: None,
// };
// b.iter(|| format!("{}", message))
// });
}
fn dlt_parse_benchmark(c: &mut Criterion) {
c.bench_function("zero_termitated_string broken input", |b| {
let mut buf = BytesMut::with_capacity(4);
let broken = vec![0x41, 0, 146, 150];
buf.extend_from_slice(&broken);
b.iter(|| dlt::dlt_parse::dlt_zero_terminated_string(&buf, 4))
});
}
criterion_group!(benches, dlt_benchmark, dlt_parse_benchmark);
criterion_main!(benches); | |
.eslintrc.js | module.exports = {
"extends": "airbnb-base",
"plugins": [
"react"
] | }; |
|
target_strs.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
pub struct | {
pub module_asm: String,
pub data_layout: String,
pub target_triple: String,
pub cc_args: Vec<String> ,
}
| t |
fancysets.py | from __future__ import print_function, division
from functools import reduce
from sympy.core.basic import Basic
from sympy.core.compatibility import with_metaclass, range, PY3
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import Lambda
from sympy.core.logic import fuzzy_not, fuzzy_or
from sympy.core.numbers import oo, Integer
from sympy.core.relational import Eq
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And
from sympy.sets.sets import (Set, Interval, Union, FiniteSet,
ProductSet)
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import cartes
class Rationals(with_metaclass(Singleton, Set)):
"""
Represents the rational numbers. This set is also available as
the Singleton, S.Rationals.
Examples
========
>>> from sympy import S
>>> S.Half in S.Rationals
True
>>> iterable = iter(S.Rationals)
>>> [next(iterable) for i in range(12)]
[0, 1, -1, 1/2, 2, -1/2, -2, 1/3, 3, -1/3, -3, 2/3]
"""
is_iterable = True
_inf = S.NegativeInfinity
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
if other.is_Number:
return other.is_Rational
return other.is_rational
def __iter__(self):
from sympy.core.numbers import igcd, Rational
yield S.Zero
yield S.One
yield S.NegativeOne
d = 2
while True:
for n in range(d):
if igcd(n, d) == 1:
yield Rational(n, d)
yield Rational(d, n)
yield Rational(-n, d)
yield Rational(-d, n)
d += 1
@property
def _boundary(self):
return self
class Naturals(with_metaclass(Singleton, Set)):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the Singleton, S.Naturals.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return False
elif other.is_positive and other.is_integer:
return True
elif other.is_integer is False or other.is_positive is False:
return False
def _eval_is_subset(self, other):
return Range(1, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(1, oo).is_superset(other)
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), x >= self.inf, x < oo)
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
elif other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
def _eval_is_subset(self, other):
return Range(oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(oo).is_superset(other)
class Integers(with_metaclass(Singleton, Set)):
"""
Represents all integers: positive, negative and zero. This set is also
available as the Singleton, S.Integers.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
return other.is_integer
def __iter__(self):
yield S.Zero
i = S.One
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return S.NegativeInfinity
@property
def _sup(self):
|
@property
def _boundary(self):
return self
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), -oo < x, x < oo)
def _eval_is_subset(self, other):
return Range(-oo, oo).is_subset(other)
def _eval_is_superset(self, other):
return Range(-oo, oo).is_superset(other)
class Reals(with_metaclass(Singleton, Interval)):
"""
Represents all real numbers
from negative infinity to positive infinity,
including all integer, rational and irrational numbers.
This set is also available as the Singleton, S.Reals.
Examples
========
>>> from sympy import S, Interval, Rational, pi, I
>>> 5 in S.Reals
True
>>> Rational(-1, 2) in S.Reals
True
>>> pi in S.Reals
True
>>> 3*I in S.Reals
False
>>> S.Reals.contains(pi)
True
See Also
========
ComplexRegion
"""
def __new__(cls):
return Interval.__new__(cls, S.NegativeInfinity, S.Infinity)
def __eq__(self, other):
return other == Interval(S.NegativeInfinity, S.Infinity)
def __hash__(self):
return hash(Interval(S.NegativeInfinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function. The transformation
must be given as a Lambda function which has as many arguments
as the elements of the set upon which it operates, e.g. 1 argument
when acting on the set of integers or 2 arguments when acting on
a complex region.
This function is not normally called directly, but is called
from `imageset`.
Examples
========
>>> from sympy import Symbol, S, pi, Dummy, Lambda
>>> from sympy.sets.sets import FiniteSet, Interval
>>> from sympy.sets.fancysets import ImageSet
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
FiniteSet(1, 4, 9)
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
If you want to get value for `x` = 2, 1/2 etc. (Please check whether the
`x` value is in `base_set` or not before passing it as args)
>>> squares.lamda(2)
4
>>> squares.lamda(S(1)/2)
1/4
>>> n = Dummy('n')
>>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0
>>> dom = Interval(-1, 1)
>>> dom.intersect(solutions)
FiniteSet(0)
See Also
========
sympy.sets.sets.imageset
"""
def __new__(cls, flambda, *sets):
if not isinstance(flambda, Lambda):
raise ValueError('First argument must be a Lambda')
signature = flambda.signature
if len(signature) != len(sets):
raise ValueError('Incompatible signature')
sets = [_sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Set arguments to ImageSet should of type Set")
if not all(cls._check_sig(sg, st) for sg, st in zip(signature, sets)):
raise ValueError("Signature %s does not match sets %s" % (signature, sets))
if flambda is S.IdentityFunction and len(sets) == 1:
return sets[0]
if not set(flambda.variables) & flambda.expr.free_symbols:
is_empty = fuzzy_or(s.is_empty for s in sets)
if is_empty == True:
return S.EmptySet
elif is_empty == False:
return FiniteSet(flambda.expr)
return Basic.__new__(cls, flambda, *sets)
lamda = property(lambda self: self.args[0])
base_sets = property(lambda self: self.args[1:])
@property
def base_set(self):
# XXX: Maybe deprecate this? It is poorly defined in handling
# the multivariate case...
sets = self.base_sets
if len(sets) == 1:
return sets[0]
else:
return ProductSet(*sets).flatten()
@property
def base_pset(self):
return ProductSet(*self.base_sets)
@classmethod
def _check_sig(cls, sig_i, set_i):
if sig_i.is_symbol:
return True
elif isinstance(set_i, ProductSet):
sets = set_i.sets
if len(sig_i) != len(sets):
return False
# Recurse through the signature for nested tuples:
return all(cls._check_sig(ts, ps) for ts, ps in zip(sig_i, sets))
else:
# XXX: Need a better way of checking whether a set is a set of
# Tuples or not. For example a FiniteSet can contain Tuples
# but so can an ImageSet or a ConditionSet. Others like
# Integers, Reals etc can not contain Tuples. We could just
# list the possibilities here... Current code for e.g.
# _contains probably only works for ProductSet.
return True # Give the benefit of the doubt
def __iter__(self):
already_seen = set()
for i in self.base_pset:
val = self.lamda(*i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.solvers.solveset import _solveset_multi
def get_symsetmap(signature, base_sets):
'''Attempt to get a map of symbols to base_sets'''
queue = list(zip(signature, base_sets))
symsetmap = {}
for sig, base_set in queue:
if sig.is_symbol:
symsetmap[sig] = base_set
elif base_set.is_ProductSet:
sets = base_set.sets
if len(sig) != len(sets):
raise ValueError("Incompatible signature")
# Recurse
queue.extend(zip(sig, sets))
else:
# If we get here then we have something like sig = (x, y) and
# base_set = {(1, 2), (3, 4)}. For now we give up.
return None
return symsetmap
def get_equations(expr, candidate):
'''Find the equations relating symbols in expr and candidate.'''
queue = [(expr, candidate)]
for e, c in queue:
if not isinstance(e, Tuple):
yield Eq(e, c)
elif not isinstance(c, Tuple) or len(e) != len(c):
yield False
return
else:
queue.extend(zip(e, c))
# Get the basic objects together:
other = _sympify(other)
expr = self.lamda.expr
sig = self.lamda.signature
variables = self.lamda.variables
base_sets = self.base_sets
# Use dummy symbols for ImageSet parameters so they don't match
# anything in other
rep = {v: Dummy(v.name) for v in variables}
variables = [v.subs(rep) for v in variables]
sig = sig.subs(rep)
expr = expr.subs(rep)
# Map the parts of other to those in the Lambda expr
equations = []
for eq in get_equations(expr, other):
# Unsatisfiable equation?
if eq is False:
return False
equations.append(eq)
# Map the symbols in the signature to the corresponding domains
symsetmap = get_symsetmap(sig, base_sets)
if symsetmap is None:
# Can't factor the base sets to a ProductSet
return None
# Which of the variables in the Lambda signature need to be solved for?
symss = (eq.free_symbols for eq in equations)
variables = set(variables) & reduce(set.union, symss, set())
# Use internal multivariate solveset
variables = tuple(variables)
base_sets = [symsetmap[v] for v in variables]
solnset = _solveset_multi(equations, variables, base_sets)
if solnset is None:
return None
return fuzzy_not(solnset.is_empty)
@property
def is_iterable(self):
return all(s.is_iterable for s in self.base_sets)
def doit(self, **kwargs):
from sympy.sets.setexpr import SetExpr
f = self.lamda
sig = f.signature
if len(sig) == 1 and sig[0].is_symbol and isinstance(f.expr, Expr):
base_set = self.base_sets[0]
return SetExpr(base_set)._eval_func(f).set
if all(s.is_FiniteSet for s in self.base_sets):
return FiniteSet(*(f(*a) for a in cartes(*self.base_sets)))
return self
class Range(Set):
"""
Represents a range of integers. Can be called as Range(stop),
Range(start, stop), or Range(start, stop, step); when stop is
not given it defaults to 1.
`Range(stop)` is the same as `Range(0, stop, 1)` and the stop value
(juse as for Python ranges) is not included in the Range values.
>>> from sympy import Range
>>> list(Range(3))
[0, 1, 2]
The step can also be negative:
>>> list(Range(10, 0, -2))
[10, 8, 6, 4, 2]
The stop value is made canonical so equivalent ranges always
have the same args:
>>> Range(0, 10, 3)
Range(0, 12, 3)
Infinite ranges are allowed. ``oo`` and ``-oo`` are never included in the
set (``Range`` is always a subset of ``Integers``). If the starting point
is infinite, then the final value is ``stop - step``. To iterate such a
range, it needs to be reversed:
>>> from sympy import oo
>>> r = Range(-oo, 1)
>>> r[-1]
0
>>> next(iter(r))
Traceback (most recent call last):
...
TypeError: Cannot iterate over Range with infinite start
>>> next(iter(r.reversed))
0
Although Range is a set (and supports the normal set
operations) it maintains the order of the elements and can
be used in contexts where `range` would be used.
>>> from sympy import Interval
>>> Range(0, 10, 2).intersect(Interval(3, 7))
Range(4, 8, 2)
>>> list(_)
[4, 6]
Although slicing of a Range will always return a Range -- possibly
empty -- an empty set will be returned from any intersection that
is empty:
>>> Range(3)[:0]
Range(0, 0, 1)
>>> Range(3).intersect(Interval(4, oo))
EmptySet
>>> Range(3).intersect(Range(4, oo))
EmptySet
Range will accept symbolic arguments but has very limited support
for doing anything other than displaying the Range:
>>> from sympy import Symbol, pprint
>>> from sympy.abc import i, j, k
>>> Range(i, j, k).start
i
>>> Range(i, j, k).inf
Traceback (most recent call last):
...
ValueError: invalid method for symbolic range
Better success will be had when using integer symbols:
>>> n = Symbol('n', integer=True)
>>> r = Range(n, n + 20, 3)
>>> r.inf
n
>>> pprint(r)
{n, n + 3, ..., n + 17}
"""
is_iterable = True
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
if len(args) == 1:
if isinstance(args[0], range):
raise TypeError(
'use sympify(%s) to convert range to Range' % args[0])
# expand range
slc = slice(*args)
if slc.step == 0:
raise ValueError("step cannot be 0")
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
ok = []
for w in (start, stop, step):
w = sympify(w)
if w in [S.NegativeInfinity, S.Infinity] or (
w.has(Symbol) and w.is_integer != False):
ok.append(w)
elif not w.is_Integer:
raise ValueError
else:
ok.append(w)
except ValueError:
raise ValueError(filldedent('''
Finite arguments to Range must be integers; `imageset` can define
other cases, e.g. use `imageset(i, i/10, Range(3))` to give
[0, 1/10, 1/5].'''))
start, stop, step = ok
null = False
if any(i.has(Symbol) for i in (start, stop, step)):
if start == stop:
null = True
else:
end = stop
elif start.is_infinite:
span = step*(stop - start)
if span is S.NaN or span <= 0:
null = True
elif step.is_Integer and stop.is_infinite and abs(step) != 1:
raise ValueError(filldedent('''
Step size must be %s in this case.''' % (1 if step > 0 else -1)))
else:
end = stop
else:
oostep = step.is_infinite
if oostep:
step = S.One if step > 0 else S.NegativeOne
n = ceiling((stop - start)/step)
if n <= 0:
null = True
elif oostep:
end = start + 1
step = S.One # make it a canonical single step
else:
end = start + n*step
if null:
start = end = S.Zero
step = S.One
return Basic.__new__(cls, start, end, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
@property
def reversed(self):
"""Return an equivalent Range in the opposite order.
Examples
========
>>> from sympy import Range
>>> Range(10).reversed
Range(9, -1, -1)
"""
if self.has(Symbol):
_ = self.size # validate
if not self:
return self
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
def _contains(self, other):
if not self:
return S.false
if other.is_infinite:
return S.false
if not other.is_integer:
return other.is_integer
if self.has(Symbol):
try:
_ = self.size # validate
except ValueError:
return
if self.start.is_finite:
ref = self.start
elif self.stop.is_finite:
ref = self.stop
else:
return other.is_Integer
if (ref - other) % self.step: # off sequence
return S.false
return _sympify(other >= self.inf and other <= self.sup)
def __iter__(self):
if self.has(Symbol):
_ = self.size # validate
if self.start in [S.NegativeInfinity, S.Infinity]:
raise TypeError("Cannot iterate over Range with infinite start")
elif self:
i = self.start
step = self.step
while True:
if (step > 0 and not (self.start <= i < self.stop)) or \
(step < 0 and not (self.stop < i <= self.start)):
break
yield i
i += step
def __len__(self):
rv = self.size
if rv is S.Infinity:
raise ValueError('Use .size to get the length of an infinite Range')
return int(rv)
@property
def size(self):
if not self:
return S.Zero
dif = self.stop - self.start
if self.has(Symbol):
if dif.has(Symbol) or self.step.has(Symbol) or (
not self.start.is_integer and not self.stop.is_integer):
raise ValueError('invalid method for symbolic range')
if dif.is_infinite:
return S.Infinity
return Integer(abs(dif//self.step))
def __nonzero__(self):
return self.start != self.stop
__bool__ = __nonzero__
def __getitem__(self, i):
from sympy.functions.elementary.integers import ceiling
ooslice = "cannot slice from the end with an infinite value"
zerostep = "slice step cannot be zero"
# if we had to take every other element in the following
# oo, ..., 6, 4, 2, 0
# we might get oo, ..., 4, 0 or oo, ..., 6, 2
ambiguous = "cannot unambiguously re-stride from the end " + \
"with an infinite value"
if isinstance(i, slice):
if self.size.is_finite: # validates, too
start, stop, step = i.indices(self.size)
n = ceiling((stop - start)/step)
if n <= 0:
return Range(0)
canonical_stop = start + n*step
end = canonical_stop - step
ss = step*self.step
return Range(self[start], self[end] + ss, ss)
else: # infinite Range
start = i.start
stop = i.stop
if i.step == 0:
raise ValueError(zerostep)
step = i.step or 1
ss = step*self.step
#---------------------
# handle infinite on right
# e.g. Range(0, oo) or Range(0, -oo, -1)
# --------------------
if self.stop.is_infinite:
# start and stop are not interdependent --
# they only depend on step --so we use the
# equivalent reversed values
return self.reversed[
stop if stop is None else -stop + 1:
start if start is None else -start:
step].reversed
#---------------------
# handle infinite on the left
# e.g. Range(oo, 0, -1) or Range(-oo, 0)
# --------------------
# consider combinations of
# start/stop {== None, < 0, == 0, > 0} and
# step {< 0, > 0}
if start is None:
if stop is None:
if step < 0:
return Range(self[-1], self.start, ss)
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step < 0:
return Range(self[-1], self[stop], ss)
else: # > 0
return Range(self.start, self[stop], ss)
elif stop == 0:
if step > 0:
return Range(0)
else: # < 0
raise ValueError(ooslice)
elif stop == 1:
if step > 0:
raise ValueError(ooslice) # infinite singleton
else: # < 0
raise ValueError(ooslice)
else: # > 1
raise ValueError(ooslice)
elif start < 0:
if stop is None:
if step < 0:
return Range(self[start], self.start, ss)
else: # > 0
return Range(self[start], self.stop, ss)
elif stop < 0:
return Range(self[start], self[stop], ss)
elif stop == 0:
if step < 0:
raise ValueError(ooslice)
else: # > 0
return Range(0)
elif stop > 0:
raise ValueError(ooslice)
elif start == 0:
if stop is None:
if step < 0:
raise ValueError(ooslice) # infinite singleton
elif step > 1:
raise ValueError(ambiguous)
else: # == 1
return self
elif stop < 0:
if step > 1:
raise ValueError(ambiguous)
elif step == 1:
return Range(self.start, self[stop], ss)
else: # < 0
return Range(0)
else: # >= 0
raise ValueError(ooslice)
elif start > 0:
raise ValueError(ooslice)
else:
if not self:
raise IndexError('Range index out of range')
if i == 0:
if self.start.is_infinite:
raise ValueError(ooslice)
if self.has(Symbol):
if (self.stop > self.start) == self.step.is_positive and self.step.is_positive is not None:
pass
else:
_ = self.size # validate
return self.start
if i == -1:
if self.stop.is_infinite:
raise ValueError(ooslice)
n = self.stop - self.step
if n.is_Integer or (
n.is_integer and (
(n - self.start).is_nonnegative ==
self.step.is_positive)):
return n
_ = self.size # validate
rv = (self.stop if i < 0 else self.start) + i*self.step
if rv.is_infinite:
raise ValueError(ooslice)
if rv < self.inf or rv > self.sup:
raise IndexError("Range index out of range")
return rv
@property
def _inf(self):
if not self:
raise NotImplementedError
if self.has(Symbol):
if self.step.is_positive:
return self[0]
elif self.step.is_negative:
return self[-1]
_ = self.size # validate
if self.step > 0:
return self.start
else:
return self.stop - self.step
@property
def _sup(self):
if not self:
raise NotImplementedError
if self.has(Symbol):
if self.step.is_positive:
return self[-1]
elif self.step.is_negative:
return self[0]
_ = self.size # validate
if self.step > 0:
return self.stop - self.step
else:
return self.start
@property
def _boundary(self):
return self
def as_relational(self, x):
"""Rewrite a Range in terms of equalities and logic operators. """
from sympy.functions.elementary.integers import floor
return And(
Eq(x, floor(x)),
x >= self.inf if self.inf in self else x > self.inf,
x <= self.sup if self.sup in self else x < self.sup)
# Using range from compatibility above (xrange on Py2)
if PY3:
converter[range] = lambda r: Range(r.start, r.stop, r.step)
else:
converter[range] = lambda r: Range(*r.__reduce__()[1])
def normalize_theta_set(theta):
"""
Normalize a Real Set `theta` in the Interval [0, 2*pi). It returns
a normalized value of theta in the Set. For Interval, a maximum of
one cycle [0, 2*pi], is returned i.e. for theta equal to [0, 10*pi],
returned normalized value would be [0, 2*pi). As of now intervals
with end points as non-multiples of `pi` is not supported.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
Interval(pi/2, pi)
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
Union(Interval(0, pi/2), Interval.Ropen(3*pi/2, 2*pi))
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
Interval.Ropen(0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
Interval(pi/2, 3*pi/2)
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
FiniteSet(0, pi)
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
if theta.is_Interval:
interval_len = theta.measure
# one complete circle
if interval_len >= 2*S.Pi:
if interval_len == 2*S.Pi and theta.left_open and theta.right_open:
k = coeff(theta.start)
return Union(Interval(0, k*S.Pi, False, True),
Interval(k*S.Pi, 2*S.Pi, True, True))
return Interval(0, 2*S.Pi, False, True)
k_start, k_end = coeff(theta.start), coeff(theta.end)
if k_start is None or k_end is None:
raise NotImplementedError("Normalizing theta without pi as coefficient is "
"not yet implemented")
new_start = k_start*S.Pi
new_end = k_end*S.Pi
if new_start > new_end:
return Union(Interval(S.Zero, new_end, False, theta.right_open),
Interval(new_start, 2*S.Pi, theta.left_open, True))
else:
return Interval(new_start, new_end, theta.left_open, theta.right_open)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if k is None:
raise NotImplementedError('Normalizing theta without pi as '
'coefficient, is not Implemented.')
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_Union:
return Union(*[normalize_theta_set(interval) for interval in theta.args])
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, it is of type %s is not "
"implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of r and theta, & use the flag polar=True.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y the of the Complex numbers in a Plane.
Default input type is in rectangular form.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c = Interval(1, 8)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
CartesianComplexRegion(ProductSet(Interval(2, 3), Interval(4, 6)))
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
CartesianComplexRegion(Union(ProductSet(Interval(2, 3), Interval(4, 6)), ProductSet(Interval(4, 6), Interval(1, 8))))
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
PolarComplexRegion(ProductSet(Interval(0, 1), Interval.Ropen(0, 2*pi)))
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
PolarComplexRegion(ProductSet(Interval(0, 1), Interval(0, pi)))
>>> intersection == upper_half_unit_disk
True
See Also
========
CartesianComplexRegion
PolarComplexRegion
Complexes
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
if polar is False:
return CartesianComplexRegion(sets)
elif polar is True:
return PolarComplexRegion(sets)
else:
raise ValueError("polar should be either True or False")
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
ProductSet(Interval(2, 3), Interval(4, 5))
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
Union(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
return self.args[0]
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
(ProductSet(Interval(2, 3), Interval(4, 5)),)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
Interval(2, 3)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
Union(Interval(2, 3), Interval(4, 5))
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
Interval(4, 5)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
Interval(1, 7)
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
@classmethod
def from_real(cls, sets):
"""
Converts given subset of real numbers to a complex region.
Examples
========
>>> from sympy import Interval, ComplexRegion
>>> unit = Interval(0,1)
>>> ComplexRegion.from_real(unit)
CartesianComplexRegion(ProductSet(Interval(0, 1), FiniteSet(0)))
"""
if not sets.is_subset(S.Reals):
raise ValueError("sets must be a subset of the real line")
return CartesianComplexRegion(sets * FiniteSet(0))
def _contains(self, other):
from sympy.functions import arg, Abs
from sympy.core.containers import Tuple
other = sympify(other)
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# If the other is not an Expression, and neither a Tuple
if not isinstance(other, Expr) and not isinstance(other, Tuple):
return S.false
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
for element in self.psets:
if And(element.args[0]._contains(re),
element.args[1]._contains(im)):
return True
return False
# self in polar form
elif self.polar:
if isTuple:
r, theta = other
elif other.is_zero:
r, theta = S.Zero, S.Zero
else:
r, theta = Abs(other), arg(other)
for element in self.psets:
if And(element.args[0]._contains(r),
element.args[1]._contains(theta)):
return True
return False
class CartesianComplexRegion(ComplexRegion):
"""
Set representing a square region of the complex plane.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets.sets import Interval
>>> from sympy import I
>>> region = ComplexRegion(Interval(1, 3) * Interval(4, 6))
>>> 2 + 5*I in region
True
>>> 5*I in region
False
See also
========
ComplexRegion
PolarComplexRegion
Complexes
"""
polar = False
variables = symbols('x, y', cls=Dummy)
def __new__(cls, sets):
if sets == S.Reals*S.Reals:
return S.Complexes
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
# FIXME: This should probably be handled with something like:
# return ImageSet(Lambda((x, y), x+I*y), sets).rewrite(FiniteSet)
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + S.ImaginaryUnit*y)
return FiniteSet(*complex_num)
else:
return Set.__new__(cls, sets)
@property
def expr(self):
x, y = self.variables
return x + S.ImaginaryUnit*y
class PolarComplexRegion(ComplexRegion):
"""
Set representing a polar region of the complex plane.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion, Interval
>>> from sympy import oo, pi, I
>>> rset = Interval(0, oo)
>>> thetaset = Interval(0, pi)
>>> upper_half_plane = ComplexRegion(rset * thetaset, polar=True)
>>> 1 + I in upper_half_plane
True
>>> 1 - I in upper_half_plane
False
See also
========
ComplexRegion
CartesianComplexRegion
Complexes
"""
polar = True
variables = symbols('r, theta', cls=Dummy)
def __new__(cls, sets):
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
return Set.__new__(cls, sets)
@property
def expr(self):
from sympy.functions.elementary.trigonometric import sin, cos
r, theta = self.variables
return r*(cos(theta) + S.ImaginaryUnit*sin(theta))
class Complexes(with_metaclass(Singleton, CartesianComplexRegion)):
"""
The Set of all complex numbers
Examples
========
>>> from sympy import S, I
>>> S.Complexes
Complexes
>>> 1 + I in S.Complexes
True
See also
========
Reals
ComplexRegion
"""
is_empty = False
is_finite_set = False
# Override property from superclass since Complexes has no args
sets = ProductSet(S.Reals, S.Reals)
def __new__(cls):
return Set.__new__(cls)
def __str__(self):
return "S.Complexes"
def __repr__(self):
return "S.Complexes"
| return S.Infinity |
io.test.js | const path = require('path')
const { createIO } = require('../io')
const io = createIO()
const directoryPath = process.cwd()
const packageJsonPath = path.resolve(directoryPath, 'package.json')
const errFilePath = path.resolve(directoryPath, 'package.err')
const newFilePath = path.resolve(directoryPath, 'newfile.txt')
const filesInDirectory = [
path.resolve(directoryPath, '.gitignore'),
path.resolve(directoryPath, 'package.json'),
path.resolve(directoryPath, 'package-lock.json'),
]
test('Test readFile scenarios', () => {
expect(io.File.readFile(packageJsonPath)).not.toBe(null)
expect(() => { io.File.readFile(directoryPath) }).toThrow(io.errorMessages.pathIsNotFile)
expect(() => { io.File.readFile(errFilePath) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.readFile() }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.readFile(5) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.readFile({}) }).toThrow(io.errorMessages.invalidPath)
})
test('Test isFile method', () => {
expect(io.File.isFile(packageJsonPath)).toBe(true)
expect(io.File.isFile(directoryPath)).toBe(false)
expect(() => { io.File.isFile(errFilePath) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.isFile() }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.isFile(5) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.isFile({}) }).toThrow(io.errorMessages.invalidPath)
})
test('Test isDirectory method', () => {
expect(io.Directory.isDirectory(packageJsonPath)).toBe(false)
expect(io.Directory.isDirectory(directoryPath)).toBe(true)
expect(() => { io.Directory.isDirectory(errFilePath) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.Directory.isDirectory() }).toThrow(io.errorMessages.invalidPath) |
test('Test getFilesFromDirectory method', () => {
// Sorting arrays because 'toEqual' takes the items order in consideration
expect(io.File.getFilesFromDirectory(directoryPath).sort()).toEqual(filesInDirectory.sort())
expect(() => { io.File.getFilesFromDirectory(errFilePath) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.getFilesFromDirectory() }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.getFilesFromDirectory(5) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.File.getFilesFromDirectory({}) }).toThrow(io.errorMessages.invalidPath)
})
test('Test createFileMethod', () => {
expect(() => { io.File.createFile(newFilePath, null) }).toThrow(io.errorMessages.invalidBuffer)
expect(() => { io.File.createFile(newFilePath, 5) }).toThrow(io.errorMessages.invalidBuffer)
expect(() => { io.File.createFile(newFilePath, 'buffer') }).toThrow(io.errorMessages.invalidBuffer)
}) | expect(() => { io.Directory.isDirectory(5) }).toThrow(io.errorMessages.invalidPath)
expect(() => { io.Directory.isDirectory({}) }).toThrow(io.errorMessages.invalidPath)
}) |
cv.js | import React from 'react';
import { animated } from 'react-spring/renderprops';
import { graphql } from 'gatsby';
import styled, { css } from 'styled-components';
import { trackCustomEvent } from 'gatsby-plugin-google-analytics';
import cv from '../assets/documents/Andrew James CV.pdf';
import { formatId } from '../utils/formatId';
import { convertPxToRem } from '../utils/unitConversion';
import { FadeThrough } from '../components/Animation';
import { LinkedInIcon } from '../components/icons/LinkedInIcon';
import { ExternalLink, DownloadLink } from '../components/Link';
import { IconButton } from '../components/Button';
import { Text } from '../components/Text';
import { SEO } from '../components/SEO';
import { MEDIA, BREAKPOINTS } from '../styles/media';
import {
EnvelopeOpenIcon,
FileIcon,
GitHubLogoIcon,
HomeIcon,
DownloadIcon,
} from '@radix-ui/react-icons';
import { Hero } from '../components/Hero';
const List = styled.ul`
margin-bottom: var(--spacing-huge);
`;
const ListItem = styled.li`
margin-bottom: var(--spacing-medium);
${Text} {
margin-left: var(--spacing-medium);
}
`;
const StyledExternalLink = styled(ExternalLink)`
display: inline-flex;
align-items: center;
line-height: 1.5rem;
`;
const Wrap = styled.div`
width: 100%;
overflow: hidden;
`;
const Main = styled.main`
position: relative;
margin-right: auto;
margin-bottom: 0;
margin-left: auto;
padding-right: var(--spacing-medium);
padding-left: var(--spacing-medium);
max-width: ${convertPxToRem(BREAKPOINTS.desktopWide)};
${MEDIA.tablet`
padding-right: var(--spacing-huge);
padding-left: var(--spacing-huge);
`}
${MEDIA.print`
margin-top: 0;
padding-right: 0;
padding-left: 0;
max-width: none;
`}
`;
const Container = styled(animated.div)(
({ theme }) => css`
flex: 1;
display: flex;
flex-direction: column;
position: relative;
background-color: ${theme.overlay10};
border-radius: 4px;
box-shadow: 0px 2px 4px rgba(0, 0, 0, 0.18);
margin-bottom: var(--spacing-large);
${MEDIA.tablet`
margin-bottom: var(--spacing-massive);
`}
${MEDIA.print`
margin-bottom: 0;
border-top: none;
`}
`,
);
const Heading = styled.div`
display: flex;
justify-content: space-between;
align-items: flex-end;
padding: var(--spacing-huge) var(--spacing-medium);
${MEDIA.tablet`
padding: var(--spacing-huge);
`}
${MEDIA.print`
padding: var(--spacing-huge);
`};
`;
const HeaderIcons = styled.div`
display: none;
${MEDIA.tablet`
display: flex;
align-items: center;
& > ${DownloadLink} {
display: inline-flex;
align-items: center;
justify-content: center;
margin-left: var(--spacing-medium);
min-width: 44px;
min-height: 44px;
}
`}
`;
const Wrapper = styled.div(
({ theme }) => css`
flex: 1;
display: flex;
flex-direction: column-reverse;
padding: 0 var(--spacing-medium);
${MEDIA.tablet`
border-top: 5px solid;
border-bottom: 5px solid;
border-color: ${theme.borderColor};
padding: var(--spacing-huge);
`}
${MEDIA.desktop`
display: inline-flex;
flex-direction: row;
`};
${MEDIA.print`
display: inline-flex;
flex-direction: row;
padding: var(--spacing-huge);
border-top: 5px solid;
border-color: var(--color-black);
`};
`,
);
const Sidebar = styled.div(
({ theme }) => css`
${MEDIA.desktop`
flex: 0 1 33%;
border-right: 2px solid;
border-color: ${theme.borderColor};
padding: 0 var(--spacing-huge) 0 0;
`};
${MEDIA.print`
flex: 0 1 33%;
border-right: 2px solid;
border-color: var(--color-black);
padding: 0 var(--spacing-huge) 0 0;
`};
`,
);
const Experience = styled.div`
padding: 0;
${MEDIA.desktop`
flex: 1;
padding-left: var(--spacing-huge);
`};
${MEDIA.print`
flex: 1;
padding-left: var(--spacing-huge);
`};
`;
const Block = styled.section`
margin-bottom: var(--spacing-huge);
${MEDIA.desktop`
margin-bottom: var(--spacing-massive);
`};
${MEDIA.print`
margin-bottom: var(--spacing-giant);
`};
`;
const BlockHeader = styled(props => <Text as="h2" size="l" {...props} />)(
({ theme }) => css`
margin-bottom: var(--spacing-large);
border-bottom: 1px solid ${theme.cvBorderColor};
color: ${theme.headerColor};
${MEDIA.print`
border-color: var(--color-black);
color: var(--color-black);
`}
`,
);
const BlockSubheader = styled(Text)(
({ theme }) => css`
color: ${theme.cvSubheaderColor};
margin-bottom: var(--spacing-tiny);
${MEDIA.print`
color: var(--color-black);
`}
`,
);
const Description = styled.div`
${Text} {
margin-top: var(--spacing-large);
padding-bottom: 0;
}
${Text}:first-child {
margin-top: var(--spacing-medium);
}
ul {
list-style-type: disc;
padding-left: var(--spacing-large);
margin-top: var(--spacing-small);
margin-bottom: 0;
}
${MEDIA.print`
${Text} {
margin-top: var(--spacing-small);
}
`}
`;
const Tag = styled(props => <Text size="xs" {...props} />)(
({ theme }) => css`
padding: var(--spacing-small);
border-radius: 4px;
text-align: center;
border: 1px solid;
border-color: ${theme.cvInterfaceColor};
color: ${theme.copyColor};
${MEDIA.print`
color: var(--color-black);
border-color: var(--color-gray-400);
`}
`,
);
const TagContainer = styled.div`
display: grid;
grid-column-gap: var(--spacing-small);
grid-row-gap: var(--spacing-small);
${MEDIA.tablet`
grid-template-columns: repeat(2, 1fr);
`}
`;
const Dates = styled(Text)(
({ theme }) => css`
display: block;
color: ${theme.auxiliaryColor};
${MEDIA.tablet`
align-self: flex-end;
margin-left: auto;
`}
${MEDIA.print`
transform: translateY(-1px);
margin-left: auto;
`}
`,
);
const EmployerLocation = styled(Text)(
({ theme }) => css`
position: relative;
color: ${theme.auxiliaryColor};
margin-top: var(--spacing-tiny);
margin-bottom: var(--spacing-tiny);
${MEDIA.tablet`
margin-top: 0;
margin-bottom: 0;
margin-left: var(--spacing-large);
&::before {
content: '/';
position: absolute;
left: calc(var(--spacing-medium) * -1);
top: 50%;
transform: translate(50%, -50%);
}
`}
${MEDIA.print`
margin-top: 0;
margin-bottom: 0;
margin-left: var(--spacing-large);
&::before {
content: '/';
position: absolute;
left: calc(var(--spacing-medium) * -1 - 2px);
top: 50%;
transform: translate(50%, -50%);
}
`}
`,
);
const ExperienceEmployer = styled.div`
display: flex;
flex-flow: column;
${MEDIA.tablet`
flex-flow: row;
align-items: baseline;
`};
${MEDIA.print`
flex-flow: row;
align-items: baseline;
`};
`;
const ExperienceInfo = styled.div`
display: flex;
flex-flow: column;
${MEDIA.tablet`
flex-flow: row;
`}
${MEDIA.print`
flex-flow: row; | `;
const AuthorInfo = styled(Text)(
({ theme }) => css`
color: ${theme.auxiliaryColor};
`,
);
const Title = styled(Text)`
color: var(--color-white);
margin-bottom: var(--spacing-large);
text-align: center;
${MEDIA.tablet`
opacity: 0;
pointer-events: none;
`}
${MEDIA.print`
display: none;
`}
`;
const EducationBlock = styled(Block)(({ variant }) => [
css`
display: flex;
flex-flow: column;
${MEDIA.desktop`
margin-bottom: var(--spacing-huge);
`};
`,
variant === 'slim' &&
css`
margin-bottom: var(--spacing-medium);
${MEDIA.desktop`
margin-bottom: var(--spacing-large);
`};
`,
]);
export default function CV({ data, location: { pathname } }) {
const { education } = data.educationJson;
const { experience } = data.experienceJson;
const { social } = data.socialJson;
const { author, siteUrl } = data.site.siteMetadata;
const currentPosition = experience[0].position;
const siteDisplayUrl = siteUrl.split('https://')[1];
const expertise = ['html', 'css/css-in-js', 'javascript', 'react'];
const interests = ['design systems', 'a11y', 'typescript', 'storybook'];
const hobbies = ['writing', 'streaming', 'cycling', 'guitar'];
function handleCvPrint() {
trackCustomEvent({
category: 'CV print button',
action: 'Click',
label: 'Print',
});
window.print();
}
function handleCvDownload() {
trackCustomEvent({
category: 'CV download link',
action: 'Click',
});
}
return (
<>
<SEO
path={pathname}
title="CV"
description="An overview of my experience and technical expertise"
/>
<Hero />
<Wrap>
<Main>
<Title as="h1" size="4xl" id="cv">
CV
</Title>
<FadeThrough>
{({ s, o }) => (
<Container
style={{
transform: s.interpolate(s => `scale(${s})`),
opacity: o && o.interpolate(o => o),
}}
>
<Heading>
<div>
<Text
as="h1"
size="4xl"
css={`
color: ${({ theme }) => theme.copyColor};
${MEDIA.print`
color: var(--color-black);
`}
`}
>
{author.name}
</Text>
<AuthorInfo size="m">
{currentPosition} / {author.location}
</AuthorInfo>
</div>
<HeaderIcons aria-label="Export CV">
<IconButton
title="Print"
aria-label="Print"
onClick={handleCvPrint}
>
<FileIcon
role="img"
title="Print"
width="2rem"
height="2rem"
/>
</IconButton>
<DownloadLink
title="Download"
aria-label="Download"
href={cv}
onClick={handleCvDownload}
>
<DownloadIcon
role="img"
title="Download"
width="2rem"
height="2rem"
/>
</DownloadLink>
</HeaderIcons>
</Heading>
<Wrapper>
<Sidebar>
<Block aria-labelledby="cv-contact">
<BlockHeader id="cv-contact">Contact</BlockHeader>
<nav aria-label="Contact">
<List>
<ListItem>
<StyledExternalLink
href={`mailto:${author.email}`}
aria-label="Email me"
>
<EnvelopeOpenIcon
role="img"
title="Email me"
width="1.5rem"
height="1.5rem"
/>
<Text size="s">{author.email}</Text>
</StyledExternalLink>
</ListItem>
<ListItem>
<StyledExternalLink
href={siteUrl}
aria-label="Return to homepage"
>
<HomeIcon
role="img"
title="Visit me"
width="1.5rem"
height="1.5rem"
/>
<Text size="s">{siteDisplayUrl}</Text>
</StyledExternalLink>
</ListItem>
<ListItem>
<StyledExternalLink
href={social.github.url}
aria-label={`${social.github.label} profile`}
>
<GitHubLogoIcon
role="img"
title="My Github profile"
width="1.5rem"
height="1.5rem"
/>
<Text size="s">{social.github.handle}</Text>
</StyledExternalLink>
</ListItem>
<ListItem>
<StyledExternalLink
href={social.linkedIn.url}
aria-label={`${social.linkedIn.label} profile`}
>
<LinkedInIcon width="1.5rem" height="1.5rem" />
<Text size="s">{social.linkedIn.handle}</Text>
</StyledExternalLink>
</ListItem>
</List>
</nav>
</Block>
<Block aria-labelledby="cv-education">
<BlockHeader id="cv-education">Education</BlockHeader>
{education.map(
(
{ qualification, course, institute, dates },
index,
) => (
<EducationBlock
key={institute}
id={`edu-${formatId(institute)}`}
aria-label={institute}
aria-labelledby={`cv-education edu-${formatId(
institute,
)}`}
variant={index === 0 && 'slim'}
>
{qualification && (
<BlockSubheader
as="h3"
size="l"
id={`edu-${formatId(qualification)}`}
>
{qualification}
</BlockSubheader>
)}
<Text
size="ps"
css="font-weight: 600; margin-bottom: var(--spacing-tiny);"
>
{course}
</Text>
<Text size="ps">{institute}</Text>
<Text
css={`
color: ${({ theme }) => theme.auxiliaryColor};
margin-top: var(--spacing-tiny);
`}
size="xs"
>
{dates}
</Text>
</EducationBlock>
),
)}
</Block>
<Block aria-labelledby="cv-expertise">
<BlockHeader id="cv-expertise">Expertise</BlockHeader>
<TagContainer>
{expertise.map((skill, index) => (
<Tag key={`Skill-${index}`}>{skill}</Tag>
))}
</TagContainer>
</Block>
<Block
aria-labelledby="cv-interests"
css={`
${MEDIA.print`padding-top: var(--spacing-huge);`}
`}
>
<BlockHeader id="cv-interests">Interests</BlockHeader>
<TagContainer>
{interests.map((interest, index) => (
<Tag key={`Interest-${index}`}>{interest}</Tag>
))}
</TagContainer>
</Block>
<Block aria-labelledby="cv-hobbies">
<BlockHeader id="cv-hobbies">Hobbies</BlockHeader>
<TagContainer>
{hobbies.map((hobby, index) => (
<Tag key={`Hobby-${index}`}>{hobby}</Tag>
))}
</TagContainer>
</Block>
<Block aria-labelledby="cv-references">
<BlockHeader id="cv-references">References</BlockHeader>
<Text>Written references available on request.</Text>
</Block>
</Sidebar>
<Experience>
<Block aria-labelledby="cv-profile">
<BlockHeader id="cv-profile">Profile</BlockHeader>
<Text as="p">
My passion for digital technology continually drives me
to advance my skill set as a software engineer. With an
analytical mindset and strong communication and frontend
development skills, I thrive in environments where I can
learn from others and inspire those around me.
</Text>
<Text as="p" css="margin-top: var(--spacing-medium);">
Over the years I've refined a set of technical
principles to strive towards, namely: complexity should
only be introduced when it’s unavoidable; code should be
easy to reason with and delete; avoid abstracting too
early, and the top priority is always the best possible
user experience.
</Text>
</Block>
<Block>
<BlockHeader id="cv-experience">Experience</BlockHeader>
{experience.map(
({
position,
company,
location,
url,
dates,
blurb,
portfolio,
}) => (
<Block
key={`${company}-${position}`}
id={`exp-${formatId(`${company}-${position}`)}`}
aria-label={`${company}: ${position}`}
>
<BlockSubheader as="h3" size="xl">
{position}
</BlockSubheader>
<ExperienceInfo>
<ExperienceEmployer>
<ExternalLink
href={url}
aria-label={`${company} website`}
css="width: fit-content;"
highlight
>
<Text>{company}</Text>
</ExternalLink>
<EmployerLocation size="ps">
{location}
</EmployerLocation>
</ExperienceEmployer>
<Dates size="xs">{dates}</Dates>
</ExperienceInfo>
<Description>
{blurb ? <Text as="p">{blurb}</Text> : null}
{portfolio ? (
<>
<Text as="h4" size="m">
Notable work
</Text>
<ul>
{portfolio.map(({ name, href }) => (
<li key={name}>
<ExternalLink href={href}>
<Text size="ps">{name}</Text>
</ExternalLink>
</li>
))}
</ul>
</>
) : null}
</Description>
</Block>
),
)}
</Block>
</Experience>
</Wrapper>
</Container>
)}
</FadeThrough>
</Main>
</Wrap>
</>
);
}
export const query = graphql`
query CvQuery {
site {
siteMetadata {
author {
name
location
email
}
siteUrl
}
}
educationJson {
education {
course
dates
institute
qualification
}
}
experienceJson {
experience {
blurb
company
dates
location
portfolio {
href
name
}
position
url
}
}
socialJson {
social {
github {
handle
label
url
}
linkedIn {
handle
label
url
}
}
}
}
`; | align-items: baseline;
`} |
weird.py | if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0: | elif 2 <= n <= 5:
print("Not Weird")
elif 6 <= n <= 20:
print("Weird")
else:
print("Not Weird") | print("Weird") |
ethernet_test.go | // Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ethernet
import (
"flag"
"fmt"
"github.com/ampf11111/namazu/nmz/endpoint/local"
"github.com/ampf11111/namazu/nmz/signal"
logutil "github.com/ampf11111/namazu/nmz/util/log"
"github.com/ampf11111/namazu/nmz/util/mockorchestrator"
"github.com/stretchr/testify/assert"
zmq "github.com/vaughan0/go-zmq"
"io/ioutil"
"os"
"testing"
)
func TestMain(m *testing.M) {
flag.Parse()
logutil.InitLog("", true) | defer local.SingletonLocalEndpoint.Shutdown()
mockOrc := mockorchestrator.NewMockOrchestrator(orcEventCh, orcActionCh)
mockOrc.Start()
defer mockOrc.Shutdown()
os.Exit(m.Run())
}
func tempZMQAddr(t *testing.T) string {
tmpfile, err := ioutil.TempFile("", "test-hookswitch-inspector")
assert.NoError(t, err)
path := tmpfile.Name()
// we don't need the file itself
os.Remove(path)
addr := fmt.Sprintf("ipc://%s", path)
return addr
}
func hookSwitchRequest(t *testing.T, id int) [][]byte {
meta := fmt.Sprintf("{\"id\":%d}", id)
eth := "\xff\xff\xff\xff\xff\xff" +
"\x00\x00\x00\x00\x00\x00" +
"\x08\x00"
frame := eth + "dummypayload"
return [][]byte{[]byte(meta), []byte(frame)}
}
func TestHookSwitchInspector_10(t *testing.T) {
testHookSwitchInspector(t, 10)
}
func testHookSwitchInspector(t *testing.T, n int) {
context, err := zmq.NewContext()
assert.NoError(t, err)
socket, err := context.Socket(zmq.Pair)
assert.NoError(t, err)
zmqAddr := tempZMQAddr(t)
insp, err := NewHookSwitchInspector(
"local://",
"_dummy_entity_id",
zmqAddr,
true)
assert.NoError(t, err)
socket.Connect(zmqAddr)
go func() {
insp.Serve()
}()
chans := socket.Channels()
for i := 0; i < n; i++ {
req := hookSwitchRequest(t, i)
chans.Out() <- req
t.Logf("Sent %d, %v", i, req)
select {
case rsp := <-chans.In():
t.Logf("Received %d, %v", i, rsp)
case err := <-chans.Errors():
t.Fatal(err)
}
}
insp.Shutdown()
chans.Close()
socket.Close()
context.Close()
} | signal.RegisterKnownSignals()
orcActionCh := make(chan signal.Action)
orcEventCh := local.SingletonLocalEndpoint.Start(orcActionCh) |
jet.go | package view
import (
"fmt"
"io"
"os"
"path"
"reflect"
"strings"
"github.com/kataras/iris/v12/context"
"github.com/CloudyKit/jet"
)
const jetEngineName = "jet"
// JetEngine is the jet template parser's view engine.
type JetEngine struct {
directory string
extension string
// physical system files or app-embedded, see `Binary(..., ...)`. Defaults to file system on initialization.
loader jet.Loader
developmentMode bool
// The Set is the `*jet.Set`, exported to offer any custom capabilities that jet users may want.
// Available after `Load`.
Set *jet.Set
// Note that global vars and functions are set in a single spot on the jet parser.
// If AddFunc or AddVar called before `Load` then these will be set here to be used via `Load` and clear.
vars map[string]interface{}
jetRangerRendererContextKey string
}
var _ Engine = (*JetEngine)(nil)
// jet library does not export or give us any option to modify them via Set
// (unless we parse the files by ourselves but this is not a smart choice).
var jetExtensions = [...]string{
".html.jet",
".jet.html",
".jet",
}
// Jet creates and returns a new jet view engine.
func Jet(directory, extension string) *JetEngine {
// if _, err := os.Stat(directory); os.IsNotExist(err) {
// panic(err)
// }
extOK := false
for _, ext := range jetExtensions {
if ext == extension {
extOK = true
break
}
}
if !extOK {
panic(fmt.Sprintf("%s extension is not a valid jet engine extension[%s]", extension, strings.Join(jetExtensions[0:], ", ")))
}
s := &JetEngine{
directory: directory,
extension: extension,
loader: jet.NewOSFileSystemLoader(directory),
}
return s
}
// DisableViewDataTypeCheck accepts a context key name to use
// to map the jet specific renderer and ranger over context's view data.
//
// If "jetDataContextKey" is not empty then `ExecuteWriter` will not check for
// types to check if an element passed through `Context.ViewData`
// contains a jet.Renderer or jet.Ranger or both.
// Instead will map those with simple key data naming (faster).
// Also it wont check if a value is already a reflect.Value (jet expects this type as values).
//
// Defaults to empty.
func (s *JetEngine) DisableViewDataTypeCheck(jetRangerRendererContextKey string) *JetEngine {
s.jetRangerRendererContextKey = jetRangerRendererContextKey
return s
}
// String returns the name of this view engine, the "jet".
func (s *JetEngine) String() string {
return jetEngineName
}
// Ext should return the final file extension which this view engine is responsible to render.
func (s *JetEngine) Ext() string {
return s.extension
}
// Delims sets the action delimiters to the specified strings, to be used in
// templates. An empty delimiter stands for the
// corresponding default: {{ or }}.
// Should act before `Load` or `iris.Application#RegisterView`.
func (s *JetEngine) Delims(left, right string) *JetEngine {
s.Set.Delims(left, right)
return s
}
// JetArguments is a type alias of `jet.Arguments`,
// can be used on `AddFunc$funcBody`.
type JetArguments = jet.Arguments
// AddFunc should adds a global function to the jet template set.
func (s *JetEngine) AddFunc(funcName string, funcBody interface{}) {
// if something like "urlpath" is registered.
if generalFunc, ok := funcBody.(func(string, ...interface{}) string); ok {
// jet, unlike others does not accept a func(string, ...interface{}) string,
// instead it wants:
// func(JetArguments) reflect.Value.
s.AddVar(funcName, func(args JetArguments) reflect.Value {
n := args.NumOfArguments()
if n == 0 { // no input, don't execute the function, panic instead.
panic(funcName + " expects one or more input arguments")
}
firstInput := args.Get(0).String()
if n == 1 { // if only the first argument is given.
return reflect.ValueOf(generalFunc(firstInput))
}
// if has variadic.
variadicN := n - 1
variadicInputs := make([]interface{}, variadicN) // except the first one.
for i := 0; i < variadicN; i++ {
variadicInputs[i] = args.Get(i + 1).Interface()
}
return reflect.ValueOf(generalFunc(firstInput, variadicInputs...))
})
return
}
if jetFunc, ok := funcBody.(jet.Func); !ok {
alternativeJetFunc, ok := funcBody.(func(JetArguments) reflect.Value)
if !ok {
panic(fmt.Sprintf("JetEngine.AddFunc: funcBody argument is not a type of func(JetArguments) reflect.Value. Got %T instead", funcBody))
}
s.AddVar(funcName, jet.Func(alternativeJetFunc))
} else {
s.AddVar(funcName, jetFunc)
}
}
// AddVar adds a global variable to the jet template set.
func (s *JetEngine) AddVar(key string, value interface{}) {
if s.Set != nil {
s.Set.AddGlobal(key, value)
} else {
if s.vars == nil {
s.vars = make(map[string]interface{})
}
s.vars[key] = value
}
}
// Reload if setted to true the templates are reloading on each render,
// use it when you're in development and you're boring of restarting
// the whole app when you edit a template file.
//
// Note that if `true` is passed then only one `View -> ExecuteWriter` will be render each time,
// not safe concurrent access across clients, use it only on development state.
func (s *JetEngine) Reload(developmentMode bool) *JetEngine {
s.developmentMode = developmentMode
if s.Set != nil {
s.Set.SetDevelopmentMode(developmentMode)
}
return s
}
// SetLoader can be used when the caller wants to use something like
// multi.Loader or httpfs.Loader of the jet subpackages,
// overrides any previous loader may set by `Binary` or the default.
// Should act before `Load` or `iris.Application#RegisterView`.
func (s *JetEngine) SetLoader(loader jet.Loader) *JetEngine {
s.loader = loader
return s
}
// Binary optionally, use it when template files are distributed
// inside the app executable (.go generated files).
//
// The assetFn and namesFn can come from the go-bindata library.
// Should act before `Load` or `iris.Application#RegisterView`.
func (s *JetEngine) Binary(assetFn func(name string) ([]byte, error), assetNames func() []string) *JetEngine {
// embedded.
vdir := s.directory
if vdir[0] == '.' {
vdir = vdir[1:]
}
// second check for /something, (or ./something if we had dot on 0 it will be removed)
if vdir[0] == '/' || vdir[0] == os.PathSeparator {
vdir = vdir[1:]
}
// check for trailing slashes because new users may be do that by mistake
// although all examples are showing the correct way but you never know
// i.e "./assets/" is not correct, if was inside "./assets".
// remove last "/".
if trailingSlashIdx := len(vdir) - 1; vdir[trailingSlashIdx] == '/' {
vdir = vdir[0:trailingSlashIdx]
}
namesSlice := assetNames()
names := make(map[string]struct{})
for _, name := range namesSlice {
if !strings.HasPrefix(name, vdir) {
continue
}
extOK := false
fileExt := path.Ext(name)
for _, ext := range jetExtensions {
if ext == fileExt {
extOK = true
break
}
}
if !extOK {
continue
}
names[name] = struct{}{}
}
if len(names) == 0 {
panic("JetEngine.Binary: no embedded files found in directory: " + vdir)
}
s.loader = &embeddedLoader{
vdir: vdir,
asset: assetFn,
names: names,
}
return s
}
type (
embeddedLoader struct {
vdir string
asset func(name string) ([]byte, error)
names map[string]struct{}
}
embeddedFile struct {
contents []byte // the contents are NOT consumed.
readen int64
}
)
var (
_ jet.Loader = (*embeddedLoader)(nil)
_ io.ReadCloser = (*embeddedFile)(nil)
)
func (f *embeddedFile) Close() error { return nil }
func (f *embeddedFile) Read(p []byte) (int, error) {
if f.readen >= int64(len(f.contents)) {
return 0, io.EOF
}
n := copy(p, f.contents[f.readen:])
f.readen += int64(n)
return n, nil
}
// Open opens a file from OS file system.
func (l *embeddedLoader) Open(name string) (io.ReadCloser, error) {
// name = path.Join(l.vdir, name)
contents, err := l.asset(name)
if err != nil {
return nil, err
}
return &embeddedFile{
contents: contents,
}, nil
}
// Exists checks if the template name exists by walking the list of template paths
// returns string with the full path of the template and bool true if the template file was found
func (l *embeddedLoader) Exists(name string) (string, bool) {
fileName := path.Join(l.vdir, name)
if _, ok := l.names[fileName]; ok {
return fileName, true
}
return "", false
}
// Load should load the templates from a physical system directory or by an embedded one (assets/go-bindata).
func (s *JetEngine) Load() error {
s.Set = jet.NewHTMLSetLoader(s.loader)
s.Set.SetDevelopmentMode(s.developmentMode)
if s.vars != nil {
for key, value := range s.vars {
s.Set.AddGlobal(key, value)
}
}
// Note that, unlike the rest of template engines implementations,
// we don't call the Set.GetTemplate to parse the templates,
// we let it to the jet template parser itself which does that at serve-time and caches each template by itself.
return nil
}
type (
// JetRuntimeVars is a type alias for `jet.VarMap`.
// Can be used at `AddJetRuntimeVars/JetEngine.AddRuntimeVars`
// to set a runtime variable ${name} to the executing template.
JetRuntimeVars = jet.VarMap
// JetRuntime is a type alias of `jet.Runtime`,
// can be used on RuntimeVariable input function.
JetRuntime = jet.Runtime
)
// JetRuntimeVarsContextKey is the Iris Context key to keep any custom jet runtime variables.
// See `AddJetRuntimeVars` package-level function and `JetEngine.AddRuntimeVars` method.
const JetRuntimeVarsContextKey = "iris.jetvarmap"
// AddJetRuntimeVars sets or inserts runtime jet variables through the Iris Context.
// This gives the ability to add runtime variables from different handlers in the request chain,
// something that the jet template parser does not offer at all.
//
// Usage: view.AddJetRuntimeVars(ctx, view.JetRuntimeVars{...}).
// See `JetEngine.AddRuntimeVars` too.
func AddJetRuntimeVars(ctx context.Context, jetVarMap JetRuntimeVars) {
if v := ctx.Values().Get(JetRuntimeVarsContextKey); v != nil {
if vars, ok := v.(JetRuntimeVars); ok {
for key, value := range jetVarMap {
vars[key] = value
}
return
}
}
ctx.Values().Set(JetRuntimeVarsContextKey, jetVarMap)
}
// AddRuntimeVars sets or inserts runtime jet variables through the Iris Context.
// This gives the ability to add runtime variables from different handlers in the request chain,
// something that the jet template parser does not offer at all.
//
// Usage: view.AddJetRuntimeVars(ctx, view.JetRuntimeVars{...}).
// See `view.AddJetRuntimeVars` if package-level access is more meanful to the code flow.
func (s *JetEngine) AddRuntimeVars(ctx context.Context, vars JetRuntimeVars) {
AddJetRuntimeVars(ctx, vars)
}
type rangerAndRenderer struct {
ranger jet.Ranger
renderer jet.Renderer
}
func (rr rangerAndRenderer) Range() (reflect.Value, reflect.Value, bool) {
return rr.ranger.Range()
}
func (rr rangerAndRenderer) Render(jetRuntime *jet.Runtime) {
rr.renderer.Render(jetRuntime)
}
func | (bindingData interface{}) (interface{}, bool) {
if ranger, ok := bindingData.(jet.Ranger); ok {
// Externally fixes a BUG on the jet template parser:
// eval.go#executeList(list *ListNode):NodeRange.isSet.getRanger(expression = st.evalPrimaryExpressionGroup)
// which does not act the "ranger" as element, instead is converted to a value of struct, which makes a jet.Ranger func(*myStruct) Range...
// not a compatible jet.Ranger.
// getRanger(st.context) should work but author of the jet library is not currently available,
// to allow a recommentation or a PR and I don't really want to vendor it because
// some end-users may use the jet import path to pass things like Global Funcs and etc.
// So to fix it (at least temporarily and only for ref Ranger) we ptr the ptr the "ranger", not the bindingData, and this may
// have its downside because the same bindingData may be compatible with other node actions like range or custom Render
// but we have no other way at the moment. The same problem exists on the `Renderer` too!
// The solution below fixes the above issue but any fields of the struct are not available,
// this is ok because most of the times if not always, the users of jet don't use fields on Ranger and custom Renderer inside the templates.
if renderer, ok := bindingData.(jet.Renderer); ok {
// this can make a Ranger and Renderer both work together, unlike the jet parser itself.
return rangerAndRenderer{ranger, renderer}, true
}
return &ranger, true
}
if renderer, ok := bindingData.(jet.Renderer); ok {
// Here the fields are not available but usually if completes the jet.Renderer no
// fields are used in the template.
return &renderer, true // see above ^.
}
return nil, false
}
// ExecuteWriter should execute a template by its filename with an optional layout and bindingData.
// See `DisableViewDataTypeCheck` too.
func (s *JetEngine) ExecuteWriter(w io.Writer, filename string, layout string, bindingData interface{}) error {
tmpl, err := s.Set.GetTemplate(filename)
if err != nil {
return err
}
var vars JetRuntimeVars
if ctx, ok := w.(context.Context); ok {
runtimeVars := ctx.Values().Get(JetRuntimeVarsContextKey)
if runtimeVars != nil {
if jetVars, ok := runtimeVars.(JetRuntimeVars); ok {
vars = jetVars
}
}
}
if bindingData == nil {
return tmpl.Execute(w, vars, nil)
}
jetRangerRenderer, ok := rangerRenderer(bindingData)
if ok {
return tmpl.Execute(w, vars, jetRangerRenderer)
}
if m, ok := bindingData.(context.Map); ok {
for k, v := range m {
if s.jetRangerRendererContextKey == "" {
switch value := v.(type) {
case jet.Ranger, jet.Renderer:
jetRangerRenderer, _ = rangerRenderer(value)
case reflect.Value:
if vars == nil {
vars = make(JetRuntimeVars)
}
// if it's already a reflect value.
vars[k] = value
default:
if vars == nil {
vars = make(JetRuntimeVars)
}
vars.Set(k, v)
}
continue
}
if k == s.jetRangerRendererContextKey {
jetRangerRenderer = v
continue
}
if vars == nil {
vars = make(JetRuntimeVars)
}
vars.Set(k, v)
}
return tmpl.Execute(w, vars, jetRangerRenderer)
}
return tmpl.Execute(w, vars, bindingData)
}
| rangerRenderer |
test_update_ticket.py | import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327_test.common import TEST_USER, TEST_TICKET, auto_login
from qa327.models import Ticket
from datetime import datetime
from qa327_test.conftest import base_url
"""
This file defines all unit tests for the login page
"""
class FrontEndUpdateTicketTest(BaseCase):
"""
A class that contains the unit tests for the login page
"""
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_alphanumeric_negative(self, *_):
"""
R5.1.1: The name of the ticket has to be alphanumeric-only - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string containing symbols (ex. "t!cket_1") into the element `#updateform_input_name`
self.type("#updateform_input_name", "t!cket_1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket has to be alphanumeric only", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_first_char(self, *_):
"""
R5.1.2: The name is only allowed spaces if it is not the first or the last character - Negative. Testing the first character.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string, that is less than 60 characters, containing only alphanumeric symbols that has a space for the first character (ex. " t1")in the element `#updateform_input_name`
self.type("#updateform_input_name", " t1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
print(datetime.now().strftime("%Y%m%d"))
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket is only allowed spaces if it is not the first or last character", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_last_char(self, *_):
"""
R5.1.3: The name is only allowed spaces if it is not the first or the last character - Negative. Testing the last character.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string, that is less than 60 characters, containing only alphanumeric symbols that
# has a space for the last character (ex. " t1")in the element `#updateform_input_name`
self.type("#updateform_input_name", "t1 ")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket: The name of the ticket has to be alphanumeric only”.
self.assert_text("Unable to update ticket: The name of the ticket is only allowed spaces if it is not the first or last character", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_name_space_in_middle(self, *_):
"""
R5.1.4: The name is only allowed spaces if it is not the first or the last character - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter a string that is less than 60 characters, containing only alphanumeric symbols that
# contains spaces that are not the first and last character (ex. "ticket 1") in the element `#updateform_input_name`
self.type("#updateform_input_name", "ticket 1")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_name(self, *_):
"""
R5.1.5: Updating to a valid name - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_long_name(self, *_):
"""
| .backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_quantity(self, *_):
"""
R5.3.1: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Negative. Testing quantity below range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter a number less than or equal to 0 into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "0")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket:
# The quantity of the ticket must be between 1 and 100”.
self.assert_text("Unable to update ticket: The quantity of the ticket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_quantity(self, *_):
"""
R5.3.2: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Negative. Testing quantity above range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter a number greater than 100 (ex. 101) into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "101")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating “Unable to update ticket:
# The quantity of the ticket must be between 1 and 100”.
self.assert_text("Unable to update ticket: The quantity of the ticket must be between 1 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_quantity(self, *_):
"""
R5.3.3: The quantity of the tickets has to be more than 0, and less than or equal to 100 - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the number 50 into the element `#updateform_input_quantity`
self.type("#updateform_input_quantity", "50")
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_low_price(self, *_):
"""
R5.4.1: Price has to be of range [10, 100] - Negative. Testing price below the range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter a number below 10 (ex. 9) into the element `#updateform_input_price`
self.type("#updateform_input_price", "9")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The price of the ticket must be between 10 and 100”.
self.assert_text("Unable to update ticket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_high_price(self, *_):
"""
R5.4.2: Price has to be of range [10, 100] - Negative. Testing price above the range.
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter a number above 100 (ex. 101) into the element `#updateform_input_price`
self.type("#updateform_input_price", "101")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The price of the ticket must be between 10 and 100”.
self.assert_text("Unable to update ticket: The price of the ticket must be between 10 and 100", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_price(self, *_):
"""
R5.4.3: Price has to be of range [10, 100] - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the number 50 into the element `#updateform_input_price`
self.type("#updateform_input_price", "50")
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_incorrect_date_format(self, *_):
"""
R5.5.1: Date must be given in the format YYYYMMDD (e.g. 20200901) - Negative
"""
# Login and user mocking is handled with the common login decorator
# Enter the test_ticket's name in element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter a date in an invalid format (ex. 20201331) into the element `#updateform_input_expiry`
self.type("#updateform_input_expiry", "20201331")
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: Date must be given in the format YYYYMMDD (e.g. 20200901)”.
self.assert_text("Unable to update ticket: Date must be given in the format YYYYMMDD (e.g. 20200901)", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_valid_date(self, *_):
"""
R5.5.2: Date must be given in the format YYYYMMDD (e.g. 20200901) - Positive.
"""
# Login and user mocking is handled with the common login decorator
# Enter test ticket's name into the element `#updateform_input_name`
self.type("#updateform_input_name", TEST_TICKET.name)
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Call function to get todays date and enter date into the element
# `#updateform_input_expiry`. Todays date is used so that the date is never in the past.
self.type("#updateform_input_expiry", datetime.now().strftime("%Y%m%d"))
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_info` element shows "Ticket was updated successfully"
self.assert_text("Ticket was updated successfully", selector = '.message_info')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@auto_login(TEST_USER)
def test_update_ticket_non_existent(self, *_):
"""
R5.6.1: The ticket of the given name must exist - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter "nonExistentTicket" in element `#updateform_input_name`
self.type("#updateform_input_name", "nonExistentTicket")
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The ticket of the given name must exist."
self.assert_text("Unable to update ticket: The ticket of the given name must exist.", selector = '.message_error')
@patch('qa327.backend.get_all_tickets', return_value=[TEST_TICKET])
@patch('qa327.backend.update_ticket', return_value=None)
@auto_login(TEST_USER)
def test_update_ticket_error_redirect(self, *_):
"""
R5.7.1: For any errors, redirect back to / and show an error message.
"""
# Login and user mocking is handled with the common login decorator
# Enter " no!tATicket " in element `#updateform_input_name`
self.type("#updateform_input_name", " no!tATicket ")
# Enter the test_ticket's quantity in element `updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# Validate that the page has been redirected to '/'
self.assert_equal(self.get_current_url(), base_url + '/')
#Validate that the `#message_error` element is shown."
self.assert_element(".message_error")
| R5.2: The name of the ticket is no longer than 60 characters - Negative.
"""
# Login and user mocking is handled with the common login decorator
# Enter “aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa”
# (61 chars) in the element element `#updateform_input_name`
self.type("#updateform_input_name", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
# Enter the test_ticket's quantity in element `#updateform_input_quantity`
self.type("#updateform_input_quantity", str(TEST_TICKET.quantity))
# Enter the test_ticket's price in element `#updateform_input_price`
self.type("#updateform_input_price", str(TEST_TICKET.price))
# Enter the test_ticket's expiry date in element `#updateform_input_expiry`
self.type("#updateform_input_expiry", TEST_TICKET.raw_expiry)
# Click element `input[type = "updateform_submit"]`
self.click('#updateform_submit')
# the welcome element is unique to the profile page
self.assert_element("#welcome")
# Validate that the `#message_error` element shows an error message stating
# “Unable to update ticket: The name of the ticket should be no longer than 60 characters”.
self.assert_text("Unable to update ticket: The name of the ticket should be no longer than 60 characters", selector = '.message_error')
@patch('qa327 |
App.test.tsx | import React from 'react'
import { render } from '@testing-library/react'
import App from './App'
test('renders learn react link', () => {
const { getByText } = render(<App />)
const linkElement = getByText(/Выберите группу/i)
expect(linkElement).toBeInTheDocument() | expect(linkElement).toBeVisible()
}) |
|
parser.rs | use log::debug;
use crate::ast::{
Ast, BinaryOperator, Block, Definition, Expression, File, Function, Statement, Type, UnaryOperator, VariableKind,
};
use crate::lexer::{Token, TokenType};
// TODO: Include the code string with this struct. That makes it self-referential, though, so we would need ouboros (?).
#[derive(Debug, Clone, Copy)]
pub struct TokenStream<'a> {
tokens: &'a [Token<'a>],
index: usize,
}
impl<'a> TokenStream<'a> {
pub const fn new(tokens: &'a [Token<'a>]) -> Self {
Self { tokens, index: 0 }
}
pub fn strip_comments(tokens: Vec<Token<'a>>) -> Vec<Token<'a>> {
tokens.into_iter().filter(|token| token.type_ != TokenType::Comment).collect()
}
pub const fn len(&self) -> usize {
self.tokens.len()
}
pub const fn is_end(&self) -> bool {
self.index >= self.len()
}
pub fn next(&mut self, error_message: &str) -> Result<Token<'a>, String> {
if self.is_end() {
Err(error_message.to_owned())
} else {
self.index += 1;
Ok(self.tokens[self.index - 1])
}
}
pub fn lookahead(&self, amount: usize, error_message: &str) -> Result<&[Token<'a>], String> {
self.tokens.get(self.index .. self.index + amount).ok_or_else(|| error_message.to_owned())
}
#[allow(clippy::cast_possible_wrap)]
pub fn backtrack(&mut self, amount: usize, error_message: &str) -> Result<(), String> {
if (self.index as isize - amount as isize) < 0 {
Err(error_message.to_owned())
} else {
self.index -= amount;
Ok(())
}
}
pub fn make_substream(&self) -> Self {
Self { tokens: &self.tokens[self.index ..], index: 0 }
}
pub fn limit_to_first(&mut self, type_: TokenType) -> &Self {
let mut current_index = self.index;
while let Some(next_token) = self.tokens.get(current_index ..= current_index) {
if next_token[0].type_ == type_ {
break;
}
current_index += 1;
}
self.tokens = &self.tokens[.. current_index];
self
}
pub fn advance_past_other(&mut self, other_stream: &Self) -> Result<&Self, String> {
let other_token = other_stream.lookahead(1, "Expected any token")?[0];
while let Ok(next_token) = self.next("") {
if next_token == other_token {
break;
}
}
Ok(self)
}
}
pub fn parse(tokens: Vec<Token>) -> Result<Box<dyn Ast>, String> {
let mut definitions = Vec::<Definition>::new();
let stripped_tokens = TokenStream::strip_comments(tokens);
let mut token_stream = TokenStream::new(&stripped_tokens);
while !token_stream.is_end() {
definitions.push(parse_definition(&mut token_stream)?);
}
Ok(Box::new(File { definitions }))
}
fn parse_definition(token_iterator: &mut TokenStream) -> Result<Definition, String> {
let first_token = token_iterator.lookahead(1, "Expected definition")?[0];
if first_token.expect(TokenType::Function).is_ok() {
token_iterator.next("")?;
let function_name = token_iterator.next("Expected function name")?.expect(TokenType::Identifer)?;
token_iterator.next("Expected '('")?.expect(TokenType::OpenParenthesis)?;
// TODO: Parse arguments
token_iterator.next("Expected ')'")?.expect(TokenType::CloseParenthesis)?;
let function_body = parse_block(token_iterator)?;
token_iterator.next("Expected '}'")?.expect(TokenType::CloseBrace)?;
Ok(Definition::Function(Function { name: function_name.text().to_string(), body: function_body }))
} else {
Ok(Definition::Statement(parse_statement(token_iterator)?))
}
}
fn parse_block(token_iterator: &mut TokenStream) -> Result<Block, String> {
token_iterator.next("Expected '{'")?.expect(TokenType::OpenBrace)?;
let mut statements = Vec::<Statement>::new();
let mut maybe_peeked_next = token_iterator.lookahead(1, "Expected statement").map(|peeked_next| peeked_next[0]);
while maybe_peeked_next.map_or(false, |peeked_next| peeked_next.type_ != TokenType::CloseBrace) {
statements.push(parse_statement(token_iterator)?);
maybe_peeked_next = token_iterator.lookahead(1, "Expected statement").map(|peeked_next| peeked_next[0]);
}
// TODO: Parse last expression
Ok(Block { statements, value: None })
}
fn parse_statement(token_iterator: &mut TokenStream) -> Result<Statement, String> {
// TODO: Parse other statement types
let kind = token_iterator.next("Expected statement")?.expect_any(&[TokenType::Var, TokenType::Const])?;
let identifier = token_iterator.next("Expected identifier")?.expect(TokenType::Identifer)?;
token_iterator.next("Expected ':'")?.expect(TokenType::Colon)?;
// TODO: Allow any type
let _type = token_iterator.next("Expected type")?.expect(TokenType::N64)?;
// Get the initializer if it exists
let initializer =
if let Ok(_equals) = token_iterator.lookahead(1, "Expected '=' or ';'")?[0].expect(TokenType::Equals) {
token_iterator.next("")?;
let mut expression_stream = token_iterator.make_substream();
expression_stream.limit_to_first(TokenType::Semicolon);
// debug!("{:#?}", expression_stream);
let expression = parse_expression(&mut expression_stream)?;
expression_stream.backtrack(1, ":yaksplode:")?;
token_iterator.advance_past_other(&expression_stream)?;
token_iterator.next("Expected ';'")?.expect(TokenType::Semicolon)?;
Some(expression)
} else {
token_iterator.next("").unwrap().expect(TokenType::Semicolon)?;
None
};
Ok(Statement::VariableDeclaration {
kind: if kind.type_ == TokenType::Const { VariableKind::Immutable } else { VariableKind::Mutable },
name: identifier.text().to_string(),
type_: Type::N64,
initial_value: initializer,
})
}
fn parse_expression(token_iterator: &mut TokenStream) -> Result<Expression, String> {
parse_binary_operation(token_iterator, parse_term, &[TokenType::Plus, TokenType::Minus])
}
fn parse_binary_operation(
token_iterator: &mut TokenStream,
sub_operation_parser: fn(&mut TokenStream) -> Result<Expression, String>,
operators: &[TokenType],
) -> Result<Expression, String> {
let mut lhs = sub_operation_parser(token_iterator)?;
let mut maybe_operator =
token_iterator.lookahead(1, &format!("Expected any of {:?}", operators)).map(|tokens| tokens[0]);
while let Ok(operator) = maybe_operator {
if !operators.contains(&operator.type_) {
break;
}
token_iterator.next("")?;
let rhs = sub_operation_parser(token_iterator)?;
lhs = Expression::BinaryOperation {
operator: BinaryOperator::from_token_type(operator.type_).unwrap(),
lhs: Box::new(lhs),
rhs: Box::new(rhs),
};
maybe_operator =
token_iterator.lookahead(1, &format!("Expected any of {:?}", operators)).map(|tokens| tokens[0]);
}
Ok(lhs)
}
fn parse_term(token_iterator: &mut TokenStream) -> Result<Expression, String> {
parse_binary_operation(token_iterator, parse_factor, &[TokenType::Star, TokenType::Slash])
}
fn parse_factor(token_iterator: &mut TokenStream) -> Result<Expression, String> {
let token = token_iterator.lookahead(1, "Expected expression")?[0];
match token.type_ {
TokenType::Plus | TokenType::Minus => {
token_iterator.next("")?;
let unary_operand = parse_factor(token_iterator)?;
Ok(Expression::UnaryOperation(
UnaryOperator::from_token_type(token.type_).unwrap(),
Box::new(unary_operand),
))
},
TokenType::IntegerLiteral => {
let number_literal = token_iterator.next("")?.expect(TokenType::IntegerLiteral)?;
// TODO: Use our own integer parser
let value: i128 =
number_literal.text().parse().map_err(|err| format!("Invalid number literal '{}'", err))?;
Ok(Expression::NaturalLiteral(value))
},
_ => Err(format!("Unknown start of expression '{:?}'", token.type_)),
}
}
#[cfg(test)]
mod test {
extern crate test;
use test::Bencher;
use super::*;
use crate::lexer;
#[bench]
fn bench_parser(bencher: &mut Bencher) {
let file_contents = std::fs::read_to_string("../powder-dev/simple.pw").unwrap();
let tokens = lexer::lex(&file_contents).unwrap();
bencher.iter(move || parse(tokens.clone()));
}
fn evaluate_expression(expression: Expression) -> i128 {
match expression {
Expression::NaturalLiteral(value) => value,
Expression::UnaryOperation(UnaryOperator::Plus, value) => evaluate_expression(*value),
Expression::UnaryOperation(UnaryOperator::Minus, value) => -evaluate_expression(*value),
Expression::BinaryOperation { lhs, operator: BinaryOperator::Add, rhs } =>
evaluate_expression(*lhs) + evaluate_expression(*rhs),
Expression::BinaryOperation { lhs, operator: BinaryOperator::Subtract, rhs } =>
evaluate_expression(*lhs) - evaluate_expression(*rhs),
Expression::BinaryOperation { lhs, operator: BinaryOperator::Multiply, rhs } =>
evaluate_expression(*lhs) * evaluate_expression(*rhs),
Expression::BinaryOperation { lhs, operator: BinaryOperator::Divide, rhs } =>
evaluate_expression(*lhs) / evaluate_expression(*rhs),
_ => panic!("Unsupported expression {:?} for evaluation", expression),
}
}
static mut TOKEN_STREAMS: Vec<Vec<Token>> = Vec::<Vec<Token>>::new();
fn create_tokens(code: &'static str) -> Result<TokenStream<'static>, String> {
unsafe {
TOKEN_STREAMS.push(lexer::lex(code)?);
Ok(TokenStream::new(&TOKEN_STREAMS[TOKEN_STREAMS.len() - 1]))
}
}
#[test]
fn test_parse_expression() -> Result<(), String> |
}
| {
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("-15")?)?), -15);
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("0 + 7 * 3 + 5")?)?), 26);
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("1 - 5")?)?), -4);
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("8 * 0 + 4")?)?), 4);
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("22 - 6 *2+1")?)?), 11);
assert_eq!(evaluate_expression(parse_expression(&mut create_tokens("2 - 2 - 1")?)?), -1);
Ok(())
} |
test_utils.rs | use crate::{
chunk::{self, ChunkMetrics, ParquetChunk},
metadata::{IoxMetadata, IoxParquetMetaData},
storage::Storage,
};
use arrow::{
array::{
Array, ArrayRef, BooleanArray, DictionaryArray, Float64Array, Int64Array, StringArray,
TimestampNanosecondArray, UInt64Array,
},
datatypes::{Int32Type, SchemaRef},
record_batch::RecordBatch,
};
use data_types::{
chunk_metadata::{ChunkAddr, ChunkId, ChunkOrder},
partition_metadata::{ColumnSummary, InfluxDbType, StatValues, Statistics, TableSummary},
server_id::ServerId,
DatabaseName,
};
use datafusion::physical_plan::SendableRecordBatchStream;
use datafusion_util::MemoryStream;
use futures::TryStreamExt;
use iox_object_store::{IoxObjectStore, ParquetFilePath};
use object_store::ObjectStore;
use parquet::{
arrow::{ArrowReader, ParquetFileArrowReader},
file::serialized_reader::{SerializedFileReader, SliceableCursor},
};
use persistence_windows::{
checkpoint::{DatabaseCheckpoint, PartitionCheckpoint, PersistCheckpointBuilder},
min_max_sequence::OptionalMinMaxSequence,
};
use schema::selection::Selection;
use schema::{builder::SchemaBuilder, Schema, TIME_COLUMN_NAME};
use snafu::{ResultExt, Snafu};
use std::{collections::BTreeMap, num::NonZeroU32, sync::Arc};
use time::Time;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Error getting data from object store: {}", source))]
GettingDataFromObjectStore { source: object_store::Error },
#[snafu(display("Error reading chunk dato from object store: {}", source))]
ReadingChunk { source: chunk::Error },
#[snafu(display("Error loading data from object store"))]
LoadingFromObjectStore {},
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, Clone, Copy)]
pub enum TestSize {
Minimal,
Full,
}
impl TestSize {
pub fn is_minimal(&self) -> bool {
matches!(self, Self::Minimal)
}
pub fn is_full(&self) -> bool {
matches!(self, Self::Full)
}
}
/// Load parquet from store and return parquet bytes.
// This function is for test only
pub async fn load_parquet_from_store(
chunk: &ParquetChunk,
store: Arc<IoxObjectStore>,
) -> Result<Vec<u8>> {
load_parquet_from_store_for_chunk(chunk, store).await
}
pub async fn load_parquet_from_store_for_chunk(
chunk: &ParquetChunk,
store: Arc<IoxObjectStore>,
) -> Result<Vec<u8>> {
let path = chunk.path();
Ok(load_parquet_from_store_for_path(path, store).await?)
}
pub async fn load_parquet_from_store_for_path(
path: &ParquetFilePath,
store: Arc<IoxObjectStore>,
) -> Result<Vec<u8>> {
let parquet_data = store
.get_parquet_file(path)
.await
.context(GettingDataFromObjectStore)?
.map_ok(|bytes| bytes.to_vec())
.try_concat()
.await
.context(GettingDataFromObjectStore)?;
Ok(parquet_data)
}
/// The db name to use for testing
pub fn db_name() -> &'static str {
"db1"
}
/// Creates a test chunk address for a given chunk id
pub fn chunk_addr(id: u128) -> ChunkAddr {
ChunkAddr {
db_name: Arc::from(db_name()),
table_name: Arc::from("table1"),
partition_key: Arc::from("part1"),
chunk_id: ChunkId::new_test(id),
}
}
/// Same as [`make_chunk`] but parquet file does not contain any row group.
pub async fn make_chunk(
iox_object_store: Arc<IoxObjectStore>,
column_prefix: &str,
addr: ChunkAddr,
test_size: TestSize,
) -> ParquetChunk {
let (record_batches, schema, column_summaries, _num_rows) =
make_record_batch(column_prefix, test_size);
make_chunk_given_record_batch(
iox_object_store,
record_batches,
schema,
addr,
column_summaries,
)
.await
}
/// Same as [`make_chunk`] but parquet file does not contain any row group.
pub async fn make_chunk_no_row_group(
store: Arc<IoxObjectStore>,
column_prefix: &str,
addr: ChunkAddr,
test_size: TestSize,
) -> ParquetChunk {
let (_, schema, column_summaries, _num_rows) = make_record_batch(column_prefix, test_size);
make_chunk_given_record_batch(store, vec![], schema, addr, column_summaries).await
}
/// Create a test chunk by writing data to object store.
///
/// TODO: This code creates a chunk that isn't hooked up with metrics
pub async fn make_chunk_given_record_batch(
iox_object_store: Arc<IoxObjectStore>,
record_batches: Vec<RecordBatch>,
schema: Schema,
addr: ChunkAddr,
column_summaries: Vec<ColumnSummary>,
) -> ParquetChunk {
let storage = Storage::new(Arc::clone(&iox_object_store));
let table_summary = TableSummary {
name: addr.table_name.to_string(),
columns: column_summaries,
};
let stream: SendableRecordBatchStream = if record_batches.is_empty() {
Box::pin(MemoryStream::new_with_schema(
record_batches,
Arc::clone(schema.inner()),
))
} else {
Box::pin(MemoryStream::new(record_batches))
};
let (partition_checkpoint, database_checkpoint) = create_partition_and_database_checkpoint(
Arc::clone(&addr.table_name),
Arc::clone(&addr.partition_key),
);
let metadata = IoxMetadata {
creation_timestamp: Time::from_timestamp(10, 20),
table_name: Arc::clone(&addr.table_name),
partition_key: Arc::clone(&addr.partition_key),
chunk_id: addr.chunk_id,
partition_checkpoint,
database_checkpoint,
time_of_first_write: Time::from_timestamp(30, 40),
time_of_last_write: Time::from_timestamp(50, 60),
chunk_order: ChunkOrder::new(5).unwrap(),
};
let (path, file_size_bytes, parquet_metadata) = storage
.write_to_object_store(addr.clone(), stream, metadata)
.await |
ParquetChunk::new_from_parts(
addr.partition_key,
Arc::new(table_summary),
Arc::new(schema),
&path,
Arc::clone(&iox_object_store),
file_size_bytes,
Arc::new(parquet_metadata),
rows,
ChunkMetrics::new_unregistered(),
)
}
fn create_column_tag(
name: &str,
data: Vec<Vec<Option<&str>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
assert_eq!(data.len(), arrow_cols.len());
for (arrow_cols_sub, data_sub) in arrow_cols.iter_mut().zip(data.iter()) {
let array: DictionaryArray<Int32Type> = data_sub.iter().cloned().collect();
let array: Arc<dyn Array> = Arc::new(array);
arrow_cols_sub.push((name.to_string(), Arc::clone(&array), true));
}
let total_count = data.iter().flatten().filter_map(|x| x.as_ref()).count() as u64;
let null_count = data.iter().flatten().filter(|x| x.is_none()).count() as u64;
summaries.push(ColumnSummary {
name: name.to_string(),
influxdb_type: Some(InfluxDbType::Tag),
stats: Statistics::String(StatValues {
min: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.min()
.map(|x| x.to_string()),
max: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.max()
.map(|x| x.to_string()),
total_count,
null_count,
distinct_count: None,
}),
});
schema_builder.tag(name);
}
fn create_columns_tag(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_tag(
&format!("{}_tag_normal", column_prefix),
vec![
vec![Some("foo")],
vec![Some("bar")],
vec![Some("baz"), Some("foo")],
],
arrow_cols,
summaries,
schema_builder,
);
if test_size.is_full() {
create_column_tag(
&format!("{}_tag_empty", column_prefix),
vec![vec![Some("")], vec![Some("")], vec![Some(""), Some("")]],
arrow_cols,
summaries,
schema_builder,
);
create_column_tag(
&format!("{}_tag_null_some", column_prefix),
vec![vec![None], vec![Some("bar")], vec![Some("baz"), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_tag(
&format!("{}_tag_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_string(
name: &str,
data: Vec<Vec<Option<&str>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_field_generic::<StringArray, _, _>(
name,
data,
arrow_cols,
summaries,
schema_builder,
|StatValues {
min,
max,
total_count,
null_count,
distinct_count,
}| {
Statistics::String(StatValues {
min: min.map(|x| x.to_string()),
max: max.map(|x| x.to_string()),
total_count,
null_count,
distinct_count,
})
},
)
}
fn create_columns_field_string(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
if test_size.is_full() {
create_column_field_string(
&format!("{}_field_string_normal", column_prefix),
vec![
vec![Some("foo")],
vec![Some("bar")],
vec![Some("baz"), Some("foo")],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_string(
&format!("{}_field_string_empty", column_prefix),
vec![vec![Some("")], vec![Some("")], vec![Some(""), Some("")]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_string(
&format!("{}_field_string_null_some", column_prefix),
vec![vec![None], vec![Some("bar")], vec![Some("baz"), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_string(
&format!("{}_field_string_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_i64(
name: &str,
data: Vec<Vec<Option<i64>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_field_generic::<Int64Array, _, _>(
name,
data,
arrow_cols,
summaries,
schema_builder,
Statistics::I64,
)
}
fn create_columns_field_i64(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_field_i64(
&format!("{}_field_i64_normal", column_prefix),
vec![vec![Some(-1)], vec![Some(2)], vec![Some(3), Some(4)]],
arrow_cols,
summaries,
schema_builder,
);
if test_size.is_full() {
create_column_field_i64(
&format!("{}_field_i64_range", column_prefix),
vec![
vec![Some(i64::MIN)],
vec![Some(i64::MAX)],
vec![Some(i64::MIN), Some(i64::MAX)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_i64(
&format!("{}_field_i64_null_some", column_prefix),
vec![vec![None], vec![Some(2)], vec![Some(3), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_i64(
&format!("{}_field_i64_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_u64(
name: &str,
data: Vec<Vec<Option<u64>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_field_generic::<UInt64Array, _, _>(
name,
data,
arrow_cols,
summaries,
schema_builder,
Statistics::U64,
)
}
fn create_columns_field_u64(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
if test_size.is_full() {
create_column_field_u64(
&format!("{}_field_u64_normal", column_prefix),
vec![vec![Some(1u64)], vec![Some(2)], vec![Some(3), Some(4)]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_u64(
&format!("{}_field_u64_range", column_prefix),
vec![
vec![Some(u64::MIN)],
vec![Some(u64::MAX)],
vec![Some(u64::MIN), Some(u64::MAX)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_u64(
&format!("{}_field_u64_null_some", column_prefix),
vec![vec![None], vec![Some(2)], vec![Some(3), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_u64(
&format!("{}_field_u64_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_f64(
name: &str,
data: Vec<Vec<Option<f64>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
assert_eq!(data.len(), arrow_cols.len());
let mut array_data_type = None;
for (arrow_cols_sub, data_sub) in arrow_cols.iter_mut().zip(data.iter()) {
let array: Arc<dyn Array> = Arc::new(Float64Array::from(data_sub.clone()));
arrow_cols_sub.push((name.to_string(), Arc::clone(&array), true));
array_data_type = Some(array.data_type().clone());
}
let total_count = data.iter().flatten().filter_map(|x| x.as_ref()).count() as u64;
let null_count = data.iter().flatten().filter(|x| x.is_none()).count() as u64;
summaries.push(ColumnSummary {
name: name.to_string(),
influxdb_type: Some(InfluxDbType::Field),
stats: Statistics::F64(StatValues {
min: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.filter(|x| !x.is_nan())
.min_by(|a, b| a.partial_cmp(b).unwrap())
.cloned(),
max: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.filter(|x| !x.is_nan())
.max_by(|a, b| a.partial_cmp(b).unwrap())
.cloned(),
total_count,
null_count,
distinct_count: None,
}),
});
schema_builder.field(name, array_data_type.unwrap());
}
fn create_columns_field_f64(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
if test_size.is_full() {
create_column_field_f64(
&format!("{}_field_f64_normal", column_prefix),
vec![
vec![Some(10.1)],
vec![Some(20.1)],
vec![Some(30.1), Some(40.1)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_f64(
&format!("{}_field_f64_inf", column_prefix),
vec![
vec![Some(0.0)],
vec![Some(f64::INFINITY)],
vec![Some(f64::NEG_INFINITY), Some(1.0)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_f64(
&format!("{}_field_f64_zero", column_prefix),
vec![
vec![Some(0.0)],
vec![Some(-0.0)],
vec![Some(0.0), Some(-0.0)],
],
arrow_cols,
summaries,
schema_builder,
);
let nan1 = f64::from_bits(0x7ff8000000000001);
let nan2 = f64::from_bits(0x7ff8000000000002);
assert!(nan1.is_nan());
assert!(nan2.is_nan());
create_column_field_f64(
&format!("{}_field_f64_nan_some", column_prefix),
vec![
vec![Some(nan1)],
vec![Some(2.0)],
vec![Some(1.0), Some(nan2)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_f64(
&format!("{}_field_f64_nan_all", column_prefix),
vec![
vec![Some(nan1)],
vec![Some(nan2)],
vec![Some(nan1), Some(nan2)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_f64(
&format!("{}_field_f64_null_some", column_prefix),
vec![vec![None], vec![Some(20.1)], vec![Some(30.1), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_f64(
&format!("{}_field_f64_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_bool(
name: &str,
data: Vec<Vec<Option<bool>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
create_column_field_generic::<BooleanArray, _, _>(
name,
data,
arrow_cols,
summaries,
schema_builder,
Statistics::Bool,
)
}
fn create_columns_field_bool(
column_prefix: &str,
test_size: TestSize,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
if test_size.is_full() {
create_column_field_bool(
&format!("{}_field_bool_normal", column_prefix),
vec![
vec![Some(true)],
vec![Some(false)],
vec![Some(true), Some(false)],
],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_bool(
&format!("{}_field_bool_null_some", column_prefix),
vec![vec![None], vec![Some(false)], vec![Some(true), None]],
arrow_cols,
summaries,
schema_builder,
);
create_column_field_bool(
&format!("{}_field_bool_null_all", column_prefix),
vec![vec![None], vec![None], vec![None, None]],
arrow_cols,
summaries,
schema_builder,
);
}
}
fn create_column_field_generic<A, T, F>(
name: &str,
data: Vec<Vec<Option<T>>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
f: F,
) where
A: 'static + Array,
A: From<Vec<Option<T>>>,
T: Clone + Ord,
F: Fn(StatValues<T>) -> Statistics,
{
assert_eq!(data.len(), arrow_cols.len());
let mut array_data_type = None;
for (arrow_cols_sub, data_sub) in arrow_cols.iter_mut().zip(data.iter()) {
let array: Arc<dyn Array> = Arc::new(A::from(data_sub.clone()));
arrow_cols_sub.push((name.to_string(), Arc::clone(&array), true));
array_data_type = Some(array.data_type().clone());
}
let total_count = data.iter().flatten().filter_map(|x| x.as_ref()).count() as u64;
let null_count = data.iter().flatten().filter(|x| x.is_none()).count() as u64;
summaries.push(ColumnSummary {
name: name.to_string(),
influxdb_type: Some(InfluxDbType::Field),
stats: f(StatValues {
min: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.min()
.cloned(),
max: data
.iter()
.flatten()
.filter_map(|x| x.as_ref())
.max()
.cloned(),
total_count,
null_count,
distinct_count: None,
}),
});
schema_builder.field(name, array_data_type.unwrap());
}
fn create_column_timestamp(
data: Vec<Vec<i64>>,
arrow_cols: &mut Vec<Vec<(String, ArrayRef, bool)>>,
summaries: &mut Vec<ColumnSummary>,
schema_builder: &mut SchemaBuilder,
) {
assert_eq!(data.len(), arrow_cols.len());
for (arrow_cols_sub, data_sub) in arrow_cols.iter_mut().zip(data.iter()) {
let array: Arc<dyn Array> =
Arc::new(TimestampNanosecondArray::from_vec(data_sub.clone(), None));
arrow_cols_sub.push((TIME_COLUMN_NAME.to_string(), Arc::clone(&array), true));
}
let min = data.iter().flatten().min().cloned();
let max = data.iter().flatten().max().cloned();
let total_count = data.iter().map(Vec::len).sum::<usize>() as u64;
let null_count = 0; // no nulls in timestamp
summaries.push(ColumnSummary {
name: TIME_COLUMN_NAME.to_string(),
influxdb_type: Some(InfluxDbType::Timestamp),
stats: Statistics::I64(StatValues {
min,
max,
total_count,
null_count,
distinct_count: None,
}),
});
schema_builder.timestamp();
}
/// Creates an Arrow RecordBatches with schema and IOx statistics.
///
/// Generated columns are prefixes using `column_prefix`.
///
/// RecordBatches, schema and IOx statistics will be generated in separate ways to emulate what the normal data
/// ingestion would do. This also ensures that the Parquet data that will later be created out of the RecordBatch is
/// indeed self-contained and can act as a source to recorder schema and statistics.
pub fn make_record_batch(
column_prefix: &str,
test_size: TestSize,
) -> (Vec<RecordBatch>, Schema, Vec<ColumnSummary>, usize) {
// (name, array, nullable)
let mut arrow_cols: Vec<Vec<(String, ArrayRef, bool)>> = vec![vec![], vec![], vec![]];
let mut summaries = vec![];
let mut schema_builder = SchemaBuilder::new();
// tag
create_columns_tag(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// field: string
create_columns_field_string(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// field: i64
create_columns_field_i64(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// field: u64
create_columns_field_u64(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// field: f64
create_columns_field_f64(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// field: bool
create_columns_field_bool(
column_prefix,
test_size,
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// time
create_column_timestamp(
vec![vec![1000], vec![2000], vec![3000, 4000]],
&mut arrow_cols,
&mut summaries,
&mut schema_builder,
);
// build record batches
let mut num_rows = 0;
let schema = schema_builder.build().expect("schema building");
let mut record_batches = vec![];
for arrow_cols_sub in arrow_cols {
let record_batch = RecordBatch::try_from_iter_with_nullable(arrow_cols_sub)
.expect("created new record batch");
// The builder-generated schema contains some extra metadata that we need in our recordbatch
let record_batch =
RecordBatch::try_new(Arc::clone(schema.inner()), record_batch.columns().to_vec())
.expect("record-batch re-creation");
num_rows += record_batch.num_rows();
record_batches.push(record_batch);
}
(record_batches, schema, summaries, num_rows)
}
/// Creates new test server ID
pub fn make_server_id() -> ServerId {
ServerId::new(NonZeroU32::new(1).unwrap())
}
/// Creates new in-memory database iox_object_store for testing.
pub async fn make_iox_object_store() -> Arc<IoxObjectStore> {
let server_id = make_server_id();
let database_name = DatabaseName::new("db1").unwrap();
Arc::new(
IoxObjectStore::new(
Arc::new(ObjectStore::new_in_memory()),
server_id,
&database_name,
)
.await
.unwrap(),
)
}
pub fn read_data_from_parquet_data(schema: SchemaRef, parquet_data: Vec<u8>) -> Vec<RecordBatch> {
let mut record_batches = vec![];
let cursor = SliceableCursor::new(parquet_data);
let reader = SerializedFileReader::new(cursor).unwrap();
let mut arrow_reader = ParquetFileArrowReader::new(Arc::new(reader));
// Indices of columns in the schema needed to read
let projection: Vec<usize> = Storage::column_indices(Selection::All, Arc::clone(&schema));
let mut batch_reader = arrow_reader
.get_record_reader_by_columns(projection, 1024)
.unwrap();
loop {
match batch_reader.next() {
Some(Ok(batch)) => {
// TODO: remove this when arow-rs' ticket https://github.com/apache/arrow-rs/issues/252#252 is done
let columns = batch.columns().to_vec();
let new_batch = RecordBatch::try_new(Arc::clone(&schema), columns).unwrap();
record_batches.push(new_batch);
}
None => {
break;
}
Some(Err(e)) => {
println!("Error reading batch: {}", e.to_string());
}
}
}
record_batches
}
/// Create test metadata by creating a parquet file and reading it back into memory.
///
/// See [`make_chunk`] for details.
pub async fn make_metadata(
iox_object_store: &Arc<IoxObjectStore>,
column_prefix: &str,
addr: ChunkAddr,
test_size: TestSize,
) -> (ParquetFilePath, IoxParquetMetaData) {
let chunk = make_chunk(Arc::clone(iox_object_store), column_prefix, addr, test_size).await;
let parquet_data = load_parquet_from_store(&chunk, Arc::clone(iox_object_store))
.await
.unwrap();
(
chunk.path().clone(),
IoxParquetMetaData::from_file_bytes(parquet_data).unwrap(),
)
}
/// Create [`PartitionCheckpoint`] and [`DatabaseCheckpoint`] for testing.
pub fn create_partition_and_database_checkpoint(
table_name: Arc<str>,
partition_key: Arc<str>,
) -> (PartitionCheckpoint, DatabaseCheckpoint) {
// create first partition checkpoint
let mut sequencer_numbers_1 = BTreeMap::new();
sequencer_numbers_1.insert(1, OptionalMinMaxSequence::new(None, 18));
sequencer_numbers_1.insert(2, OptionalMinMaxSequence::new(Some(25), 28));
let flush_timestamp = Time::from_timestamp(10, 20);
let partition_checkpoint_1 = PartitionCheckpoint::new(
Arc::clone(&table_name),
Arc::clone(&partition_key),
sequencer_numbers_1,
flush_timestamp,
);
// create second partition
let mut sequencer_numbers_2 = BTreeMap::new();
sequencer_numbers_2.insert(2, OptionalMinMaxSequence::new(Some(24), 29));
sequencer_numbers_2.insert(3, OptionalMinMaxSequence::new(Some(35), 38));
// build database checkpoint
let mut builder = PersistCheckpointBuilder::new(partition_checkpoint_1);
builder.register_other_partition(&sequencer_numbers_2);
builder.build()
} | .unwrap();
let rows = parquet_metadata.decode().unwrap().row_count(); |
app.py | #!env python3
from flask import Flask, request, redirect
from hashlib import sha256
import hmac
import base64
import time
import urllib
# allow for relative importing if run directly
if __name__ == "__main__":
from config import secrets, reports, listen_port
else:
from .config import secrets, reports, listen_port
app = Flask(__name__)
@app.route('/report/<report>')
def sign_report_url(report):
# check for a valid token
provided_token = request.args.get('token') or 'missing'
if provided_token != secrets.get('access_token'):
return "Missing or incorrect token provided"
# lookup report and generate URL from values
if report in reports:
this_report = reports.get(report)
# Generating the embed URL
mode_report_id = this_report.get('mode_report')
param_name = this_report.get('param_name')
param_value = request.args.get(
'account_id') or this_report.get('param_default_value')
do_iframe = request.args.get('iframe') or False
timestamp = str(int(time.time())) # current time in unix time
url = make_url('https://app.mode.com', secrets.get('mode_team'), 'reports',
mode_report_id, 'embed', access_key=secrets.get('mode_access_key'),
max_age=3600, **{param_name: param_value}, run='now', timestamp=timestamp)
else:
return f"Missing report {report}"
request_type = 'GET'
content_type = ''
# the MD5 digest of an empty content body, always the same, :shrug:
content_digest = '1B2M2Y8AsgTpgAmY7PhCfg=='
# signature fodder
request_string = ','.join(
[request_type, content_type, str(content_digest), url, timestamp])
signature = hmac.new(bytes(secrets.get('mode_access_secret'), 'utf-8'),
bytes(request_string, 'utf-8'), digestmod=sha256).hexdigest()
signed_url = '%s&signature=%s' % (url, signature)
if do_iframe is not False:
# return the signed URL as an iframe
return f"""
<iframe src='{signed_url}' width='100%' height='100%' frameborder='0' </iframe>
"""
else:
# return the signed URL as a redirect
return redirect(signed_url, code=302)
def make_url(base_url, *res, **params):
url = base_url
for r in res:
url = '{}/{}'.format(url, r)
if params:
url = '{}?{}'.format(url, urllib.parse.urlencode(params))
return url
@app.route('/status')
def status():
return 'Success'
if __name__ == "__main__": | app.run(host='0.0.0.0', port=listen_port) |
|
flatten.rs | use nu_protocol::ast::{
Block, Expr, Expression, ImportPatternMember, PathMember, Pipeline, Statement,
};
use nu_protocol::{engine::StateWorkingSet, Span};
use std::fmt::{Display, Formatter, Result};
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum FlatShape {
Garbage,
Nothing,
Bool,
Int,
Float,
Range,
InternalCall,
External,
ExternalArg,
Literal,
Operator,
Signature,
String,
StringInterpolation,
List,
Table,
Record,
Block,
Filepath,
GlobPattern,
Variable,
Flag,
Custom(String),
}
impl Display for FlatShape {
fn fmt(&self, f: &mut Formatter) -> Result {
match self {
FlatShape::Garbage => write!(f, "flatshape_garbage"),
FlatShape::Nothing => write!(f, "flatshape_nothing"),
FlatShape::Bool => write!(f, "flatshape_bool"),
FlatShape::Int => write!(f, "flatshape_int"),
FlatShape::Float => write!(f, "flatshape_float"),
FlatShape::Range => write!(f, "flatshape_range"),
FlatShape::InternalCall => write!(f, "flatshape_internalcall"),
FlatShape::External => write!(f, "flatshape_external"),
FlatShape::ExternalArg => write!(f, "flatshape_externalarg"),
FlatShape::Literal => write!(f, "flatshape_literal"),
FlatShape::Operator => write!(f, "flatshape_operator"),
FlatShape::Signature => write!(f, "flatshape_signature"),
FlatShape::String => write!(f, "flatshape_string"),
FlatShape::StringInterpolation => write!(f, "flatshape_string_interpolation"),
FlatShape::List => write!(f, "flatshape_string_interpolation"),
FlatShape::Table => write!(f, "flatshape_table"),
FlatShape::Record => write!(f, "flatshape_record"),
FlatShape::Block => write!(f, "flatshape_block"),
FlatShape::Filepath => write!(f, "flatshape_filepath"),
FlatShape::GlobPattern => write!(f, "flatshape_globpattern"),
FlatShape::Variable => write!(f, "flatshape_variable"),
FlatShape::Flag => write!(f, "flatshape_flag"),
FlatShape::Custom(_) => write!(f, "flatshape_custom"),
}
}
}
pub fn flatten_block(working_set: &StateWorkingSet, block: &Block) -> Vec<(Span, FlatShape)> {
let mut output = vec![];
for stmt in &block.stmts {
output.extend(flatten_statement(working_set, stmt));
}
output
}
pub fn flatten_statement(
working_set: &StateWorkingSet,
stmt: &Statement,
) -> Vec<(Span, FlatShape)> {
match stmt {
Statement::Pipeline(pipeline) => flatten_pipeline(working_set, pipeline),
_ => vec![],
}
}
pub fn flatten_expression(
working_set: &StateWorkingSet,
expr: &Expression,
) -> Vec<(Span, FlatShape)> {
if let Some(custom_completion) = &expr.custom_completion {
return vec![(expr.span, FlatShape::Custom(custom_completion.clone()))];
}
match &expr.expr {
Expr::BinaryOp(lhs, op, rhs) => {
let mut output = vec![];
output.extend(flatten_expression(working_set, lhs));
output.extend(flatten_expression(working_set, op));
output.extend(flatten_expression(working_set, rhs));
output
}
Expr::Block(block_id) | Expr::RowCondition(block_id) | Expr::Subexpression(block_id) => {
let outer_span = expr.span;
let mut output = vec![];
let flattened = flatten_block(working_set, working_set.get_block(*block_id));
if let Some(first) = flattened.first() {
if first.0.start > outer_span.start {
output.push((
Span {
start: outer_span.start,
end: first.0.start,
},
FlatShape::Block,
));
}
}
let last = if let Some(last) = flattened.last() {
if last.0.end < outer_span.end {
Some((
Span {
start: last.0.end,
end: outer_span.end,
},
FlatShape::Table,
))
} else {
None
}
} else {
None
};
output.extend(flattened);
if let Some(last) = last {
output.push(last)
}
output
}
Expr::Call(call) => { | let mut args = vec![];
for positional in &call.positional {
args.extend(flatten_expression(working_set, positional));
}
for named in &call.named {
args.push((named.0.span, FlatShape::Flag));
if let Some(expr) = &named.1 {
args.extend(flatten_expression(working_set, expr));
}
}
// sort these since flags and positional args can be intermixed
args.sort();
output.extend(args);
output
}
Expr::ExternalCall(head, args) => {
let mut output = vec![];
match **head {
Expression {
expr: Expr::String(..),
span,
..
} => {
output.push((span, FlatShape::External));
}
_ => {
output.extend(flatten_expression(working_set, head));
}
}
for arg in args {
//output.push((*arg, FlatShape::ExternalArg));
match arg {
Expression {
expr: Expr::String(..),
span,
..
} => {
output.push((*span, FlatShape::ExternalArg));
}
_ => {
output.extend(flatten_expression(working_set, arg));
}
}
}
output
}
Expr::Garbage => {
vec![(expr.span, FlatShape::Garbage)]
}
Expr::Nothing => {
vec![(expr.span, FlatShape::Nothing)]
}
Expr::Int(_) => {
vec![(expr.span, FlatShape::Int)]
}
Expr::Float(_) => {
vec![(expr.span, FlatShape::Float)]
}
Expr::ValueWithUnit(x, unit) => {
let mut output = flatten_expression(working_set, x);
output.push((unit.span, FlatShape::String));
output
}
Expr::CellPath(cell_path) => {
let mut output = vec![];
for path_element in &cell_path.members {
match path_element {
PathMember::String { span, .. } => output.push((*span, FlatShape::String)),
PathMember::Int { span, .. } => output.push((*span, FlatShape::Int)),
}
}
output
}
Expr::FullCellPath(cell_path) => {
let mut output = vec![];
output.extend(flatten_expression(working_set, &cell_path.head));
for path_element in &cell_path.tail {
match path_element {
PathMember::String { span, .. } => output.push((*span, FlatShape::String)),
PathMember::Int { span, .. } => output.push((*span, FlatShape::Int)),
}
}
output
}
Expr::ImportPattern(import_pattern) => {
let mut output = vec![(import_pattern.head.span, FlatShape::String)];
for member in &import_pattern.members {
match member {
ImportPatternMember::Glob { span } => output.push((*span, FlatShape::String)),
ImportPatternMember::Name { span, .. } => {
output.push((*span, FlatShape::String))
}
ImportPatternMember::List { names } => {
for (_, span) in names {
output.push((*span, FlatShape::String));
}
}
}
}
output
}
Expr::Range(from, next, to, op) => {
let mut output = vec![];
if let Some(f) = from {
output.extend(flatten_expression(working_set, f));
}
if let Some(s) = next {
output.extend(vec![(op.next_op_span, FlatShape::Operator)]);
output.extend(flatten_expression(working_set, s));
}
output.extend(vec![(op.span, FlatShape::Operator)]);
if let Some(t) = to {
output.extend(flatten_expression(working_set, t));
}
output
}
Expr::Bool(_) => {
vec![(expr.span, FlatShape::Bool)]
}
Expr::Filepath(_) => {
vec![(expr.span, FlatShape::Filepath)]
}
Expr::GlobPattern(_) => {
vec![(expr.span, FlatShape::GlobPattern)]
}
Expr::List(list) => {
let outer_span = expr.span;
let mut last_end = outer_span.start;
let mut output = vec![];
for l in list {
let flattened = flatten_expression(working_set, l);
if let Some(first) = flattened.first() {
if first.0.start > last_end {
output.push((
Span {
start: last_end,
end: first.0.start,
},
FlatShape::List,
));
}
}
if let Some(last) = flattened.last() {
last_end = last.0.end;
}
output.extend(flattened);
}
if last_end < outer_span.end {
output.push((
Span {
start: last_end,
end: outer_span.end,
},
FlatShape::List,
));
}
output
}
Expr::StringInterpolation(exprs) => {
let mut output = vec![(
Span {
start: expr.span.start,
end: expr.span.start + 2,
},
FlatShape::StringInterpolation,
)];
for expr in exprs {
output.extend(flatten_expression(working_set, expr));
}
output.push((
Span {
start: expr.span.end - 1,
end: expr.span.end,
},
FlatShape::StringInterpolation,
));
output
}
Expr::Record(list) => {
let outer_span = expr.span;
let mut last_end = outer_span.start;
let mut output = vec![];
for l in list {
let flattened_lhs = flatten_expression(working_set, &l.0);
let flattened_rhs = flatten_expression(working_set, &l.1);
if let Some(first) = flattened_lhs.first() {
if first.0.start > last_end {
output.push((
Span {
start: last_end,
end: first.0.start,
},
FlatShape::Record,
));
}
}
if let Some(last) = flattened_lhs.last() {
last_end = last.0.end;
}
output.extend(flattened_lhs);
if let Some(first) = flattened_rhs.first() {
if first.0.start > last_end {
output.push((
Span {
start: last_end,
end: first.0.start,
},
FlatShape::Record,
));
}
}
if let Some(last) = flattened_rhs.last() {
last_end = last.0.end;
}
output.extend(flattened_rhs);
}
if last_end < outer_span.end {
output.push((
Span {
start: last_end,
end: outer_span.end,
},
FlatShape::Record,
));
}
output
}
Expr::Keyword(_, span, expr) => {
let mut output = vec![(*span, FlatShape::InternalCall)];
output.extend(flatten_expression(working_set, expr));
output
}
Expr::Operator(_) => {
vec![(expr.span, FlatShape::Operator)]
}
Expr::Signature(_) => {
vec![(expr.span, FlatShape::Signature)]
}
Expr::String(_) => {
vec![(expr.span, FlatShape::String)]
}
Expr::Table(headers, cells) => {
let outer_span = expr.span;
let mut last_end = outer_span.start;
let mut output = vec![];
for e in headers {
let flattened = flatten_expression(working_set, e);
if let Some(first) = flattened.first() {
if first.0.start > last_end {
output.push((
Span {
start: last_end,
end: first.0.start,
},
FlatShape::Table,
));
}
}
if let Some(last) = flattened.last() {
last_end = last.0.end;
}
output.extend(flattened);
}
for row in cells {
for expr in row {
let flattened = flatten_expression(working_set, expr);
if let Some(first) = flattened.first() {
if first.0.start > last_end {
output.push((
Span {
start: last_end,
end: first.0.start,
},
FlatShape::Table,
));
}
}
if let Some(last) = flattened.last() {
last_end = last.0.end;
}
output.extend(flattened);
}
}
if last_end < outer_span.end {
output.push((
Span {
start: last_end,
end: outer_span.end,
},
FlatShape::Table,
));
}
output
}
Expr::Var(_) | Expr::VarDecl(_) => {
vec![(expr.span, FlatShape::Variable)]
}
}
}
pub fn flatten_pipeline(
working_set: &StateWorkingSet,
pipeline: &Pipeline,
) -> Vec<(Span, FlatShape)> {
let mut output = vec![];
for expr in &pipeline.expressions {
output.extend(flatten_expression(working_set, expr))
}
output
} | let mut output = vec![(call.head, FlatShape::InternalCall)];
|
_utils.py | from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
import warnings
from optuna.distributions import CategoricalDistribution
from optuna.distributions import LogUniformDistribution
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.visualization import _plotly_imports
__all__ = ["is_available"]
def is_available() -> bool:
|
def _check_plot_args(
study: Union[Study, Sequence[Study]],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
) -> None:
studies: Sequence[Study]
if isinstance(study, Study):
studies = [study]
else:
studies = study
if target is None and any(study._is_multi_objective() for study in studies):
raise ValueError(
"If the `study` is being used for multi-objective optimization, "
"please specify the `target`."
)
if target is not None and target_name == "Objective Value":
warnings.warn(
"`target` is specified, but `target_name` is the default value, 'Objective Value'."
)
def _is_log_scale(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], LogUniformDistribution)
for t in trials
if param in t.params
)
def _is_categorical(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], CategoricalDistribution)
for t in trials
if param in t.params
)
| """Returns whether visualization with plotly is available or not.
.. note::
:mod:`~optuna.visualization` module depends on plotly version 4.0.0 or higher. If a
supported version of plotly isn't installed in your environment, this function will return
:obj:`False`. In such case, please execute ``$ pip install -U plotly>=4.0.0`` to install
plotly.
Returns:
:obj:`True` if visualization with plotly is available, :obj:`False` otherwise.
"""
return _plotly_imports._imports.is_successful() |
views_mixins.py | class FilterBackendViewMixin:
filter_backends = None
def get_queryset(self):
| result = super(FilterBackendViewMixin, self).get_queryset()
for filter_backend in self.filter_backends:
filter_instance = filter_backend(request=self.request, view=self)
result = filter_instance.filter_queryset(result)
return result |
|
endpointConfiguration.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package sagemaker
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Provides a SageMaker endpoint configuration resource.
//
// ## Example Usage
//
// Basic usage:
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v4/go/aws/sagemaker"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := sagemaker.NewEndpointConfiguration(ctx, "ec", &sagemaker.EndpointConfigurationArgs{
// ProductionVariants: sagemaker.EndpointConfigurationProductionVariantArray{
// &sagemaker.EndpointConfigurationProductionVariantArgs{
// VariantName: pulumi.String("variant-1"),
// ModelName: pulumi.Any(aws_sagemaker_model.M.Name),
// InitialInstanceCount: pulumi.Int(1),
// InstanceType: pulumi.String("ml.t2.medium"),
// },
// },
// Tags: pulumi.StringMap{
// "Name": pulumi.String("foo"),
// },
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// Endpoint configurations can be imported using the `name`, e.g.
//
// ```sh
// $ pulumi import aws:sagemaker/endpointConfiguration:EndpointConfiguration test_endpoint_config endpoint-config-foo
// ```
type EndpointConfiguration struct {
pulumi.CustomResourceState
// The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration.
Arn pulumi.StringOutput `pulumi:"arn"`
// Specifies configuration for how an endpoint performs asynchronous inference.
AsyncInferenceConfig EndpointConfigurationAsyncInferenceConfigPtrOutput `pulumi:"asyncInferenceConfig"`
// Specifies the parameters to capture input/output of Sagemaker models endpoints. Fields are documented below.
DataCaptureConfig EndpointConfigurationDataCaptureConfigPtrOutput `pulumi:"dataCaptureConfig"`
// Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
KmsKeyArn pulumi.StringPtrOutput `pulumi:"kmsKeyArn"`
// The name of the endpoint configuration. If omitted, this provider will assign a random, unique name.
Name pulumi.StringOutput `pulumi:"name"`
// Fields are documented below.
ProductionVariants EndpointConfigurationProductionVariantArrayOutput `pulumi:"productionVariants"`
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"`
}
// NewEndpointConfiguration registers a new resource with the given unique name, arguments, and options.
func NewEndpointConfiguration(ctx *pulumi.Context,
name string, args *EndpointConfigurationArgs, opts ...pulumi.ResourceOption) (*EndpointConfiguration, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ProductionVariants == nil {
return nil, errors.New("invalid value for required argument 'ProductionVariants'")
}
var resource EndpointConfiguration
err := ctx.RegisterResource("aws:sagemaker/endpointConfiguration:EndpointConfiguration", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetEndpointConfiguration gets an existing EndpointConfiguration resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetEndpointConfiguration(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *EndpointConfigurationState, opts ...pulumi.ResourceOption) (*EndpointConfiguration, error) |
// Input properties used for looking up and filtering EndpointConfiguration resources.
type endpointConfigurationState struct {
// The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration.
Arn *string `pulumi:"arn"`
// Specifies configuration for how an endpoint performs asynchronous inference.
AsyncInferenceConfig *EndpointConfigurationAsyncInferenceConfig `pulumi:"asyncInferenceConfig"`
// Specifies the parameters to capture input/output of Sagemaker models endpoints. Fields are documented below.
DataCaptureConfig *EndpointConfigurationDataCaptureConfig `pulumi:"dataCaptureConfig"`
// Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
KmsKeyArn *string `pulumi:"kmsKeyArn"`
// The name of the endpoint configuration. If omitted, this provider will assign a random, unique name.
Name *string `pulumi:"name"`
// Fields are documented below.
ProductionVariants []EndpointConfigurationProductionVariant `pulumi:"productionVariants"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll map[string]string `pulumi:"tagsAll"`
}
type EndpointConfigurationState struct {
// The Amazon Resource Name (ARN) assigned by AWS to this endpoint configuration.
Arn pulumi.StringPtrInput
// Specifies configuration for how an endpoint performs asynchronous inference.
AsyncInferenceConfig EndpointConfigurationAsyncInferenceConfigPtrInput
// Specifies the parameters to capture input/output of Sagemaker models endpoints. Fields are documented below.
DataCaptureConfig EndpointConfigurationDataCaptureConfigPtrInput
// Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
KmsKeyArn pulumi.StringPtrInput
// The name of the endpoint configuration. If omitted, this provider will assign a random, unique name.
Name pulumi.StringPtrInput
// Fields are documented below.
ProductionVariants EndpointConfigurationProductionVariantArrayInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
// A map of tags assigned to the resource, including those inherited from the provider .
TagsAll pulumi.StringMapInput
}
func (EndpointConfigurationState) ElementType() reflect.Type {
return reflect.TypeOf((*endpointConfigurationState)(nil)).Elem()
}
type endpointConfigurationArgs struct {
// Specifies configuration for how an endpoint performs asynchronous inference.
AsyncInferenceConfig *EndpointConfigurationAsyncInferenceConfig `pulumi:"asyncInferenceConfig"`
// Specifies the parameters to capture input/output of Sagemaker models endpoints. Fields are documented below.
DataCaptureConfig *EndpointConfigurationDataCaptureConfig `pulumi:"dataCaptureConfig"`
// Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
KmsKeyArn *string `pulumi:"kmsKeyArn"`
// The name of the endpoint configuration. If omitted, this provider will assign a random, unique name.
Name *string `pulumi:"name"`
// Fields are documented below.
ProductionVariants []EndpointConfigurationProductionVariant `pulumi:"productionVariants"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a EndpointConfiguration resource.
type EndpointConfigurationArgs struct {
// Specifies configuration for how an endpoint performs asynchronous inference.
AsyncInferenceConfig EndpointConfigurationAsyncInferenceConfigPtrInput
// Specifies the parameters to capture input/output of Sagemaker models endpoints. Fields are documented below.
DataCaptureConfig EndpointConfigurationDataCaptureConfigPtrInput
// Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
KmsKeyArn pulumi.StringPtrInput
// The name of the endpoint configuration. If omitted, this provider will assign a random, unique name.
Name pulumi.StringPtrInput
// Fields are documented below.
ProductionVariants EndpointConfigurationProductionVariantArrayInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
}
func (EndpointConfigurationArgs) ElementType() reflect.Type {
return reflect.TypeOf((*endpointConfigurationArgs)(nil)).Elem()
}
type EndpointConfigurationInput interface {
pulumi.Input
ToEndpointConfigurationOutput() EndpointConfigurationOutput
ToEndpointConfigurationOutputWithContext(ctx context.Context) EndpointConfigurationOutput
}
func (*EndpointConfiguration) ElementType() reflect.Type {
return reflect.TypeOf((*EndpointConfiguration)(nil))
}
func (i *EndpointConfiguration) ToEndpointConfigurationOutput() EndpointConfigurationOutput {
return i.ToEndpointConfigurationOutputWithContext(context.Background())
}
func (i *EndpointConfiguration) ToEndpointConfigurationOutputWithContext(ctx context.Context) EndpointConfigurationOutput {
return pulumi.ToOutputWithContext(ctx, i).(EndpointConfigurationOutput)
}
func (i *EndpointConfiguration) ToEndpointConfigurationPtrOutput() EndpointConfigurationPtrOutput {
return i.ToEndpointConfigurationPtrOutputWithContext(context.Background())
}
func (i *EndpointConfiguration) ToEndpointConfigurationPtrOutputWithContext(ctx context.Context) EndpointConfigurationPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(EndpointConfigurationPtrOutput)
}
type EndpointConfigurationPtrInput interface {
pulumi.Input
ToEndpointConfigurationPtrOutput() EndpointConfigurationPtrOutput
ToEndpointConfigurationPtrOutputWithContext(ctx context.Context) EndpointConfigurationPtrOutput
}
type endpointConfigurationPtrType EndpointConfigurationArgs
func (*endpointConfigurationPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**EndpointConfiguration)(nil))
}
func (i *endpointConfigurationPtrType) ToEndpointConfigurationPtrOutput() EndpointConfigurationPtrOutput {
return i.ToEndpointConfigurationPtrOutputWithContext(context.Background())
}
func (i *endpointConfigurationPtrType) ToEndpointConfigurationPtrOutputWithContext(ctx context.Context) EndpointConfigurationPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(EndpointConfigurationPtrOutput)
}
// EndpointConfigurationArrayInput is an input type that accepts EndpointConfigurationArray and EndpointConfigurationArrayOutput values.
// You can construct a concrete instance of `EndpointConfigurationArrayInput` via:
//
// EndpointConfigurationArray{ EndpointConfigurationArgs{...} }
type EndpointConfigurationArrayInput interface {
pulumi.Input
ToEndpointConfigurationArrayOutput() EndpointConfigurationArrayOutput
ToEndpointConfigurationArrayOutputWithContext(context.Context) EndpointConfigurationArrayOutput
}
type EndpointConfigurationArray []EndpointConfigurationInput
func (EndpointConfigurationArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]*EndpointConfiguration)(nil)).Elem()
}
func (i EndpointConfigurationArray) ToEndpointConfigurationArrayOutput() EndpointConfigurationArrayOutput {
return i.ToEndpointConfigurationArrayOutputWithContext(context.Background())
}
func (i EndpointConfigurationArray) ToEndpointConfigurationArrayOutputWithContext(ctx context.Context) EndpointConfigurationArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(EndpointConfigurationArrayOutput)
}
// EndpointConfigurationMapInput is an input type that accepts EndpointConfigurationMap and EndpointConfigurationMapOutput values.
// You can construct a concrete instance of `EndpointConfigurationMapInput` via:
//
// EndpointConfigurationMap{ "key": EndpointConfigurationArgs{...} }
type EndpointConfigurationMapInput interface {
pulumi.Input
ToEndpointConfigurationMapOutput() EndpointConfigurationMapOutput
ToEndpointConfigurationMapOutputWithContext(context.Context) EndpointConfigurationMapOutput
}
type EndpointConfigurationMap map[string]EndpointConfigurationInput
func (EndpointConfigurationMap) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*EndpointConfiguration)(nil)).Elem()
}
func (i EndpointConfigurationMap) ToEndpointConfigurationMapOutput() EndpointConfigurationMapOutput {
return i.ToEndpointConfigurationMapOutputWithContext(context.Background())
}
func (i EndpointConfigurationMap) ToEndpointConfigurationMapOutputWithContext(ctx context.Context) EndpointConfigurationMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(EndpointConfigurationMapOutput)
}
type EndpointConfigurationOutput struct{ *pulumi.OutputState }
func (EndpointConfigurationOutput) ElementType() reflect.Type {
return reflect.TypeOf((*EndpointConfiguration)(nil))
}
func (o EndpointConfigurationOutput) ToEndpointConfigurationOutput() EndpointConfigurationOutput {
return o
}
func (o EndpointConfigurationOutput) ToEndpointConfigurationOutputWithContext(ctx context.Context) EndpointConfigurationOutput {
return o
}
func (o EndpointConfigurationOutput) ToEndpointConfigurationPtrOutput() EndpointConfigurationPtrOutput {
return o.ToEndpointConfigurationPtrOutputWithContext(context.Background())
}
func (o EndpointConfigurationOutput) ToEndpointConfigurationPtrOutputWithContext(ctx context.Context) EndpointConfigurationPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v EndpointConfiguration) *EndpointConfiguration {
return &v
}).(EndpointConfigurationPtrOutput)
}
type EndpointConfigurationPtrOutput struct{ *pulumi.OutputState }
func (EndpointConfigurationPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**EndpointConfiguration)(nil))
}
func (o EndpointConfigurationPtrOutput) ToEndpointConfigurationPtrOutput() EndpointConfigurationPtrOutput {
return o
}
func (o EndpointConfigurationPtrOutput) ToEndpointConfigurationPtrOutputWithContext(ctx context.Context) EndpointConfigurationPtrOutput {
return o
}
func (o EndpointConfigurationPtrOutput) Elem() EndpointConfigurationOutput {
return o.ApplyT(func(v *EndpointConfiguration) EndpointConfiguration {
if v != nil {
return *v
}
var ret EndpointConfiguration
return ret
}).(EndpointConfigurationOutput)
}
type EndpointConfigurationArrayOutput struct{ *pulumi.OutputState }
func (EndpointConfigurationArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]EndpointConfiguration)(nil))
}
func (o EndpointConfigurationArrayOutput) ToEndpointConfigurationArrayOutput() EndpointConfigurationArrayOutput {
return o
}
func (o EndpointConfigurationArrayOutput) ToEndpointConfigurationArrayOutputWithContext(ctx context.Context) EndpointConfigurationArrayOutput {
return o
}
func (o EndpointConfigurationArrayOutput) Index(i pulumi.IntInput) EndpointConfigurationOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) EndpointConfiguration {
return vs[0].([]EndpointConfiguration)[vs[1].(int)]
}).(EndpointConfigurationOutput)
}
type EndpointConfigurationMapOutput struct{ *pulumi.OutputState }
func (EndpointConfigurationMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]EndpointConfiguration)(nil))
}
func (o EndpointConfigurationMapOutput) ToEndpointConfigurationMapOutput() EndpointConfigurationMapOutput {
return o
}
func (o EndpointConfigurationMapOutput) ToEndpointConfigurationMapOutputWithContext(ctx context.Context) EndpointConfigurationMapOutput {
return o
}
func (o EndpointConfigurationMapOutput) MapIndex(k pulumi.StringInput) EndpointConfigurationOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) EndpointConfiguration {
return vs[0].(map[string]EndpointConfiguration)[vs[1].(string)]
}).(EndpointConfigurationOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*EndpointConfigurationInput)(nil)).Elem(), &EndpointConfiguration{})
pulumi.RegisterInputType(reflect.TypeOf((*EndpointConfigurationPtrInput)(nil)).Elem(), &EndpointConfiguration{})
pulumi.RegisterInputType(reflect.TypeOf((*EndpointConfigurationArrayInput)(nil)).Elem(), EndpointConfigurationArray{})
pulumi.RegisterInputType(reflect.TypeOf((*EndpointConfigurationMapInput)(nil)).Elem(), EndpointConfigurationMap{})
pulumi.RegisterOutputType(EndpointConfigurationOutput{})
pulumi.RegisterOutputType(EndpointConfigurationPtrOutput{})
pulumi.RegisterOutputType(EndpointConfigurationArrayOutput{})
pulumi.RegisterOutputType(EndpointConfigurationMapOutput{})
}
| {
var resource EndpointConfiguration
err := ctx.ReadResource("aws:sagemaker/endpointConfiguration:EndpointConfiguration", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
fire-alt-icon.js | import { createIcon } from '../createIcon';
export const FireAltIconConfig = {
name: 'FireAltIcon',
height: 512, | xOffset: 0,
};
export const FireAltIcon = createIcon(FireAltIconConfig);
export default FireAltIcon; | width: 448,
svgPath: 'M323.56 51.2c-20.8 19.3-39.58 39.59-56.22 59.97C240.08 73.62 206.28 35.53 168 0 69.74 91.17 0 209.96 0 281.6 0 408.85 100.29 512 224 512s224-103.15 224-230.4c0-53.27-51.98-163.14-124.44-230.4zm-19.47 340.65C282.43 407.01 255.72 416 226.86 416 154.71 416 96 368.26 96 290.75c0-38.61 24.31-72.63 72.79-130.75 6.93 7.98 98.83 125.34 98.83 125.34l58.63-66.88c4.14 6.85 7.91 13.55 11.27 19.97 27.35 52.19 15.81 118.97-33.43 153.42z',
yOffset: 0, |
ControlCameraRounded.js | "use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
var _interopRequireWildcard = require("@babel/runtime/helpers/interopRequireWildcard");
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
var React = _interopRequireWildcard(require("react"));
var _createSvgIcon = _interopRequireDefault(require("./utils/createSvgIcon"));
| var _default = (0, _createSvgIcon.default)( /*#__PURE__*/(0, _jsxRuntime.jsxs)(React.Fragment, {
children: [/*#__PURE__*/(0, _jsxRuntime.jsx)("path", {
d: "M4.65 9.35 2.7 11.3c-.39.39-.39 1.02 0 1.41l1.95 1.95c.49.49 1.28.49 1.77 0 .48-.49.48-1.27 0-1.76l-.88-.9.88-.89c.48-.49.48-1.27 0-1.76s-1.28-.49-1.77 0zm12.93 0c-.48.49-.48 1.27 0 1.76l.88.89-.88.89c-.48.49-.48 1.27 0 1.76.49.49 1.28.49 1.77 0l1.95-1.95c.39-.39.39-1.02 0-1.41l-1.95-1.95c-.49-.48-1.29-.48-1.77.01zM12 18.46l-.89-.88c-.49-.48-1.27-.48-1.76 0-.49.49-.49 1.28 0 1.77l1.95 1.95c.39.39 1.02.39 1.41 0l1.95-1.95c.49-.49.49-1.28 0-1.77-.49-.48-1.27-.48-1.76 0l-.9.88zM9.35 6.42c.49.48 1.27.48 1.76 0l.89-.88.89.88c.49.48 1.27.48 1.76 0 .49-.49.49-1.28 0-1.77L12.7 2.7a.9959.9959 0 0 0-1.41 0L9.35 4.65c-.49.49-.49 1.29 0 1.77z"
}), /*#__PURE__*/(0, _jsxRuntime.jsx)("circle", {
cx: "12",
cy: "12",
r: "3"
})]
}), 'ControlCameraRounded');
exports.default = _default; | var _jsxRuntime = require("react/jsx-runtime");
|
base16-kimber.py | from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class | (Style):
base00 = '#222222'
base01 = '#313131'
base02 = '#555D55'
base03 = '#644646'
base04 = '#5A5A5A'
base05 = '#DEDEE7'
base06 = '#C3C3B4'
base07 = '#FFFFE6'
base08 = '#C88C8C'
base09 = '#476C88'
base0a = '#D8B56D'
base0b = '#99C899'
base0c = '#78B4B4'
base0d = '#537C9C'
base0e = '#86CACD'
base0f = '#704F4F'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: f'italic {base03}', # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('kimber', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
| BaseSixteenStyle |
api.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package amplify
import (
"fmt"
"time"
"github.com/mateuszwojcikcc/aws-sdk-go/aws"
"github.com/mateuszwojcikcc/aws-sdk-go/aws/awsutil"
"github.com/mateuszwojcikcc/aws-sdk-go/aws/request"
"github.com/mateuszwojcikcc/aws-sdk-go/private/protocol"
"github.com/mateuszwojcikcc/aws-sdk-go/private/protocol/restjson"
)
const opCreateApp = "CreateApp"
// CreateAppRequest generates a "aws/request.Request" representing the
// client's request for the CreateApp operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateApp for more information on using the CreateApp
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateAppRequest method.
// req, resp := client.CreateAppRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateApp
func (c *Amplify) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) {
op := &request.Operation{
Name: opCreateApp,
HTTPMethod: "POST",
HTTPPath: "/apps",
}
if input == nil {
input = &CreateAppInput{}
}
output = &CreateAppOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateApp API operation for AWS Amplify.
//
// Creates a new Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateApp for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateApp
func (c *Amplify) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) {
req, out := c.CreateAppRequest(input)
return out, req.Send()
}
// CreateAppWithContext is the same as CreateApp with the addition of
// the ability to pass a context and additional request options.
//
// See CreateApp for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateAppWithContext(ctx aws.Context, input *CreateAppInput, opts ...request.Option) (*CreateAppOutput, error) {
req, out := c.CreateAppRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateBackendEnvironment = "CreateBackendEnvironment"
// CreateBackendEnvironmentRequest generates a "aws/request.Request" representing the
// client's request for the CreateBackendEnvironment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateBackendEnvironment for more information on using the CreateBackendEnvironment
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateBackendEnvironmentRequest method.
// req, resp := client.CreateBackendEnvironmentRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBackendEnvironment
func (c *Amplify) CreateBackendEnvironmentRequest(input *CreateBackendEnvironmentInput) (req *request.Request, output *CreateBackendEnvironmentOutput) {
op := &request.Operation{
Name: opCreateBackendEnvironment,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/backendenvironments",
}
if input == nil {
input = &CreateBackendEnvironmentInput{}
}
output = &CreateBackendEnvironmentOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateBackendEnvironment API operation for AWS Amplify.
//
// Creates a new backend environment for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateBackendEnvironment for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBackendEnvironment
func (c *Amplify) CreateBackendEnvironment(input *CreateBackendEnvironmentInput) (*CreateBackendEnvironmentOutput, error) {
req, out := c.CreateBackendEnvironmentRequest(input)
return out, req.Send()
}
// CreateBackendEnvironmentWithContext is the same as CreateBackendEnvironment with the addition of
// the ability to pass a context and additional request options.
//
// See CreateBackendEnvironment for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateBackendEnvironmentWithContext(ctx aws.Context, input *CreateBackendEnvironmentInput, opts ...request.Option) (*CreateBackendEnvironmentOutput, error) {
req, out := c.CreateBackendEnvironmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateBranch = "CreateBranch"
// CreateBranchRequest generates a "aws/request.Request" representing the
// client's request for the CreateBranch operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateBranch for more information on using the CreateBranch
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateBranchRequest method.
// req, resp := client.CreateBranchRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBranch
func (c *Amplify) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) {
op := &request.Operation{
Name: opCreateBranch,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/branches",
}
if input == nil {
input = &CreateBranchInput{}
}
output = &CreateBranchOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateBranch API operation for AWS Amplify.
//
// Creates a new branch for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateBranch for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBranch
func (c *Amplify) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) {
req, out := c.CreateBranchRequest(input)
return out, req.Send()
}
// CreateBranchWithContext is the same as CreateBranch with the addition of
// the ability to pass a context and additional request options.
//
// See CreateBranch for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateBranchWithContext(ctx aws.Context, input *CreateBranchInput, opts ...request.Option) (*CreateBranchOutput, error) {
req, out := c.CreateBranchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateDeployment = "CreateDeployment"
// CreateDeploymentRequest generates a "aws/request.Request" representing the
// client's request for the CreateDeployment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateDeployment for more information on using the CreateDeployment
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateDeploymentRequest method.
// req, resp := client.CreateDeploymentRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDeployment
func (c *Amplify) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) {
op := &request.Operation{
Name: opCreateDeployment,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/branches/{branchName}/deployments",
}
if input == nil {
input = &CreateDeploymentInput{}
}
output = &CreateDeploymentOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateDeployment API operation for AWS Amplify.
//
// Creates a deployment for a manually deployed Amplify app. Manually deployed
// apps are not connected to a repository.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateDeployment for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDeployment
func (c *Amplify) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) {
req, out := c.CreateDeploymentRequest(input)
return out, req.Send()
}
// CreateDeploymentWithContext is the same as CreateDeployment with the addition of
// the ability to pass a context and additional request options.
//
// See CreateDeployment for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateDeploymentWithContext(ctx aws.Context, input *CreateDeploymentInput, opts ...request.Option) (*CreateDeploymentOutput, error) {
req, out := c.CreateDeploymentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateDomainAssociation = "CreateDomainAssociation"
// CreateDomainAssociationRequest generates a "aws/request.Request" representing the
// client's request for the CreateDomainAssociation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateDomainAssociation for more information on using the CreateDomainAssociation
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateDomainAssociationRequest method.
// req, resp := client.CreateDomainAssociationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDomainAssociation
func (c *Amplify) CreateDomainAssociationRequest(input *CreateDomainAssociationInput) (req *request.Request, output *CreateDomainAssociationOutput) {
op := &request.Operation{
Name: opCreateDomainAssociation,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/domains",
}
if input == nil {
input = &CreateDomainAssociationInput{}
}
output = &CreateDomainAssociationOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateDomainAssociation API operation for AWS Amplify.
//
// Creates a new domain association for an Amplify app. This action associates
// a custom domain with the Amplify app
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateDomainAssociation for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDomainAssociation
func (c *Amplify) CreateDomainAssociation(input *CreateDomainAssociationInput) (*CreateDomainAssociationOutput, error) {
req, out := c.CreateDomainAssociationRequest(input)
return out, req.Send()
}
// CreateDomainAssociationWithContext is the same as CreateDomainAssociation with the addition of
// the ability to pass a context and additional request options.
//
// See CreateDomainAssociation for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateDomainAssociationWithContext(ctx aws.Context, input *CreateDomainAssociationInput, opts ...request.Option) (*CreateDomainAssociationOutput, error) {
req, out := c.CreateDomainAssociationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateWebhook = "CreateWebhook"
// CreateWebhookRequest generates a "aws/request.Request" representing the
// client's request for the CreateWebhook operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateWebhook for more information on using the CreateWebhook
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateWebhookRequest method.
// req, resp := client.CreateWebhookRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateWebhook
func (c *Amplify) CreateWebhookRequest(input *CreateWebhookInput) (req *request.Request, output *CreateWebhookOutput) {
op := &request.Operation{
Name: opCreateWebhook,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/webhooks",
}
if input == nil {
input = &CreateWebhookInput{}
}
output = &CreateWebhookOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateWebhook API operation for AWS Amplify.
//
// Creates a new webhook on an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation CreateWebhook for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateWebhook
func (c *Amplify) CreateWebhook(input *CreateWebhookInput) (*CreateWebhookOutput, error) {
req, out := c.CreateWebhookRequest(input)
return out, req.Send()
}
// CreateWebhookWithContext is the same as CreateWebhook with the addition of
// the ability to pass a context and additional request options.
//
// See CreateWebhook for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) CreateWebhookWithContext(ctx aws.Context, input *CreateWebhookInput, opts ...request.Option) (*CreateWebhookOutput, error) {
req, out := c.CreateWebhookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteApp = "DeleteApp"
// DeleteAppRequest generates a "aws/request.Request" representing the
// client's request for the DeleteApp operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteApp for more information on using the DeleteApp
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteAppRequest method.
// req, resp := client.DeleteAppRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteApp
func (c *Amplify) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) {
op := &request.Operation{
Name: opDeleteApp,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}",
}
if input == nil {
input = &DeleteAppInput{}
}
output = &DeleteAppOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteApp API operation for AWS Amplify.
//
// Deletes an existing Amplify app specified by an app ID.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteApp for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteApp
func (c *Amplify) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) {
req, out := c.DeleteAppRequest(input)
return out, req.Send()
}
// DeleteAppWithContext is the same as DeleteApp with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteApp for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteAppWithContext(ctx aws.Context, input *DeleteAppInput, opts ...request.Option) (*DeleteAppOutput, error) {
req, out := c.DeleteAppRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackendEnvironment = "DeleteBackendEnvironment"
// DeleteBackendEnvironmentRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackendEnvironment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackendEnvironment for more information on using the DeleteBackendEnvironment
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackendEnvironmentRequest method.
// req, resp := client.DeleteBackendEnvironmentRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBackendEnvironment
func (c *Amplify) DeleteBackendEnvironmentRequest(input *DeleteBackendEnvironmentInput) (req *request.Request, output *DeleteBackendEnvironmentOutput) {
op := &request.Operation{
Name: opDeleteBackendEnvironment,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}/backendenvironments/{environmentName}",
}
if input == nil {
input = &DeleteBackendEnvironmentInput{}
}
output = &DeleteBackendEnvironmentOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteBackendEnvironment API operation for AWS Amplify.
//
// Deletes a backend environment for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteBackendEnvironment for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBackendEnvironment
func (c *Amplify) DeleteBackendEnvironment(input *DeleteBackendEnvironmentInput) (*DeleteBackendEnvironmentOutput, error) {
req, out := c.DeleteBackendEnvironmentRequest(input)
return out, req.Send()
}
// DeleteBackendEnvironmentWithContext is the same as DeleteBackendEnvironment with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackendEnvironment for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteBackendEnvironmentWithContext(ctx aws.Context, input *DeleteBackendEnvironmentInput, opts ...request.Option) (*DeleteBackendEnvironmentOutput, error) {
req, out := c.DeleteBackendEnvironmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBranch = "DeleteBranch"
// DeleteBranchRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBranch operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBranch for more information on using the DeleteBranch
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBranchRequest method.
// req, resp := client.DeleteBranchRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBranch
func (c *Amplify) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Request, output *DeleteBranchOutput) {
op := &request.Operation{
Name: opDeleteBranch,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}/branches/{branchName}",
}
if input == nil {
input = &DeleteBranchInput{}
}
output = &DeleteBranchOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteBranch API operation for AWS Amplify.
//
// Deletes a branch for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteBranch for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBranch
func (c *Amplify) DeleteBranch(input *DeleteBranchInput) (*DeleteBranchOutput, error) {
req, out := c.DeleteBranchRequest(input)
return out, req.Send()
}
// DeleteBranchWithContext is the same as DeleteBranch with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBranch for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteBranchWithContext(ctx aws.Context, input *DeleteBranchInput, opts ...request.Option) (*DeleteBranchOutput, error) {
req, out := c.DeleteBranchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteDomainAssociation = "DeleteDomainAssociation"
// DeleteDomainAssociationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteDomainAssociation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteDomainAssociation for more information on using the DeleteDomainAssociation
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteDomainAssociationRequest method.
// req, resp := client.DeleteDomainAssociationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteDomainAssociation
func (c *Amplify) DeleteDomainAssociationRequest(input *DeleteDomainAssociationInput) (req *request.Request, output *DeleteDomainAssociationOutput) {
op := &request.Operation{
Name: opDeleteDomainAssociation,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}/domains/{domainName}",
}
if input == nil {
input = &DeleteDomainAssociationInput{}
}
output = &DeleteDomainAssociationOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteDomainAssociation API operation for AWS Amplify.
//
// Deletes a domain association for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteDomainAssociation for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteDomainAssociation
func (c *Amplify) DeleteDomainAssociation(input *DeleteDomainAssociationInput) (*DeleteDomainAssociationOutput, error) {
req, out := c.DeleteDomainAssociationRequest(input)
return out, req.Send()
}
// DeleteDomainAssociationWithContext is the same as DeleteDomainAssociation with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteDomainAssociation for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteDomainAssociationWithContext(ctx aws.Context, input *DeleteDomainAssociationInput, opts ...request.Option) (*DeleteDomainAssociationOutput, error) {
req, out := c.DeleteDomainAssociationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteJob = "DeleteJob"
// DeleteJobRequest generates a "aws/request.Request" representing the
// client's request for the DeleteJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteJob for more information on using the DeleteJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteJobRequest method.
// req, resp := client.DeleteJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteJob
func (c *Amplify) DeleteJobRequest(input *DeleteJobInput) (req *request.Request, output *DeleteJobOutput) {
op := &request.Operation{
Name: opDeleteJob,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
}
if input == nil {
input = &DeleteJobInput{}
}
output = &DeleteJobOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteJob API operation for AWS Amplify.
//
// Deletes a job for a branch of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteJob for usage and error information.
//
// Returned Error Types:
// * BadRequestException | //
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteJob
func (c *Amplify) DeleteJob(input *DeleteJobInput) (*DeleteJobOutput, error) {
req, out := c.DeleteJobRequest(input)
return out, req.Send()
}
// DeleteJobWithContext is the same as DeleteJob with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteJobWithContext(ctx aws.Context, input *DeleteJobInput, opts ...request.Option) (*DeleteJobOutput, error) {
req, out := c.DeleteJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteWebhook = "DeleteWebhook"
// DeleteWebhookRequest generates a "aws/request.Request" representing the
// client's request for the DeleteWebhook operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteWebhook for more information on using the DeleteWebhook
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteWebhookRequest method.
// req, resp := client.DeleteWebhookRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteWebhook
func (c *Amplify) DeleteWebhookRequest(input *DeleteWebhookInput) (req *request.Request, output *DeleteWebhookOutput) {
op := &request.Operation{
Name: opDeleteWebhook,
HTTPMethod: "DELETE",
HTTPPath: "/webhooks/{webhookId}",
}
if input == nil {
input = &DeleteWebhookInput{}
}
output = &DeleteWebhookOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteWebhook API operation for AWS Amplify.
//
// Deletes a webhook.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation DeleteWebhook for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteWebhook
func (c *Amplify) DeleteWebhook(input *DeleteWebhookInput) (*DeleteWebhookOutput, error) {
req, out := c.DeleteWebhookRequest(input)
return out, req.Send()
}
// DeleteWebhookWithContext is the same as DeleteWebhook with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteWebhook for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) DeleteWebhookWithContext(ctx aws.Context, input *DeleteWebhookInput, opts ...request.Option) (*DeleteWebhookOutput, error) {
req, out := c.DeleteWebhookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGenerateAccessLogs = "GenerateAccessLogs"
// GenerateAccessLogsRequest generates a "aws/request.Request" representing the
// client's request for the GenerateAccessLogs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GenerateAccessLogs for more information on using the GenerateAccessLogs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GenerateAccessLogsRequest method.
// req, resp := client.GenerateAccessLogsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GenerateAccessLogs
func (c *Amplify) GenerateAccessLogsRequest(input *GenerateAccessLogsInput) (req *request.Request, output *GenerateAccessLogsOutput) {
op := &request.Operation{
Name: opGenerateAccessLogs,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/accesslogs",
}
if input == nil {
input = &GenerateAccessLogsInput{}
}
output = &GenerateAccessLogsOutput{}
req = c.newRequest(op, input, output)
return
}
// GenerateAccessLogs API operation for AWS Amplify.
//
// Returns the website access logs for a specific time range using a presigned
// URL.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GenerateAccessLogs for usage and error information.
//
// Returned Error Types:
// * NotFoundException
// An entity was not found during an operation.
//
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GenerateAccessLogs
func (c *Amplify) GenerateAccessLogs(input *GenerateAccessLogsInput) (*GenerateAccessLogsOutput, error) {
req, out := c.GenerateAccessLogsRequest(input)
return out, req.Send()
}
// GenerateAccessLogsWithContext is the same as GenerateAccessLogs with the addition of
// the ability to pass a context and additional request options.
//
// See GenerateAccessLogs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GenerateAccessLogsWithContext(ctx aws.Context, input *GenerateAccessLogsInput, opts ...request.Option) (*GenerateAccessLogsOutput, error) {
req, out := c.GenerateAccessLogsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetApp = "GetApp"
// GetAppRequest generates a "aws/request.Request" representing the
// client's request for the GetApp operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetApp for more information on using the GetApp
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetAppRequest method.
// req, resp := client.GetAppRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetApp
func (c *Amplify) GetAppRequest(input *GetAppInput) (req *request.Request, output *GetAppOutput) {
op := &request.Operation{
Name: opGetApp,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}",
}
if input == nil {
input = &GetAppInput{}
}
output = &GetAppOutput{}
req = c.newRequest(op, input, output)
return
}
// GetApp API operation for AWS Amplify.
//
// Returns an existing Amplify app by appID.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetApp for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetApp
func (c *Amplify) GetApp(input *GetAppInput) (*GetAppOutput, error) {
req, out := c.GetAppRequest(input)
return out, req.Send()
}
// GetAppWithContext is the same as GetApp with the addition of
// the ability to pass a context and additional request options.
//
// See GetApp for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetAppWithContext(ctx aws.Context, input *GetAppInput, opts ...request.Option) (*GetAppOutput, error) {
req, out := c.GetAppRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetArtifactUrl = "GetArtifactUrl"
// GetArtifactUrlRequest generates a "aws/request.Request" representing the
// client's request for the GetArtifactUrl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetArtifactUrl for more information on using the GetArtifactUrl
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetArtifactUrlRequest method.
// req, resp := client.GetArtifactUrlRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetArtifactUrl
func (c *Amplify) GetArtifactUrlRequest(input *GetArtifactUrlInput) (req *request.Request, output *GetArtifactUrlOutput) {
op := &request.Operation{
Name: opGetArtifactUrl,
HTTPMethod: "GET",
HTTPPath: "/artifacts/{artifactId}",
}
if input == nil {
input = &GetArtifactUrlInput{}
}
output = &GetArtifactUrlOutput{}
req = c.newRequest(op, input, output)
return
}
// GetArtifactUrl API operation for AWS Amplify.
//
// Returns the artifact info that corresponds to an artifact id.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetArtifactUrl for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetArtifactUrl
func (c *Amplify) GetArtifactUrl(input *GetArtifactUrlInput) (*GetArtifactUrlOutput, error) {
req, out := c.GetArtifactUrlRequest(input)
return out, req.Send()
}
// GetArtifactUrlWithContext is the same as GetArtifactUrl with the addition of
// the ability to pass a context and additional request options.
//
// See GetArtifactUrl for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetArtifactUrlWithContext(ctx aws.Context, input *GetArtifactUrlInput, opts ...request.Option) (*GetArtifactUrlOutput, error) {
req, out := c.GetArtifactUrlRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackendEnvironment = "GetBackendEnvironment"
// GetBackendEnvironmentRequest generates a "aws/request.Request" representing the
// client's request for the GetBackendEnvironment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackendEnvironment for more information on using the GetBackendEnvironment
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackendEnvironmentRequest method.
// req, resp := client.GetBackendEnvironmentRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBackendEnvironment
func (c *Amplify) GetBackendEnvironmentRequest(input *GetBackendEnvironmentInput) (req *request.Request, output *GetBackendEnvironmentOutput) {
op := &request.Operation{
Name: opGetBackendEnvironment,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/backendenvironments/{environmentName}",
}
if input == nil {
input = &GetBackendEnvironmentInput{}
}
output = &GetBackendEnvironmentOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackendEnvironment API operation for AWS Amplify.
//
// Returns a backend environment for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetBackendEnvironment for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBackendEnvironment
func (c *Amplify) GetBackendEnvironment(input *GetBackendEnvironmentInput) (*GetBackendEnvironmentOutput, error) {
req, out := c.GetBackendEnvironmentRequest(input)
return out, req.Send()
}
// GetBackendEnvironmentWithContext is the same as GetBackendEnvironment with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackendEnvironment for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetBackendEnvironmentWithContext(ctx aws.Context, input *GetBackendEnvironmentInput, opts ...request.Option) (*GetBackendEnvironmentOutput, error) {
req, out := c.GetBackendEnvironmentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBranch = "GetBranch"
// GetBranchRequest generates a "aws/request.Request" representing the
// client's request for the GetBranch operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBranch for more information on using the GetBranch
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBranchRequest method.
// req, resp := client.GetBranchRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBranch
func (c *Amplify) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) {
op := &request.Operation{
Name: opGetBranch,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/branches/{branchName}",
}
if input == nil {
input = &GetBranchInput{}
}
output = &GetBranchOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBranch API operation for AWS Amplify.
//
// Returns a branch for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetBranch for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBranch
func (c *Amplify) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) {
req, out := c.GetBranchRequest(input)
return out, req.Send()
}
// GetBranchWithContext is the same as GetBranch with the addition of
// the ability to pass a context and additional request options.
//
// See GetBranch for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetBranchWithContext(ctx aws.Context, input *GetBranchInput, opts ...request.Option) (*GetBranchOutput, error) {
req, out := c.GetBranchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetDomainAssociation = "GetDomainAssociation"
// GetDomainAssociationRequest generates a "aws/request.Request" representing the
// client's request for the GetDomainAssociation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetDomainAssociation for more information on using the GetDomainAssociation
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetDomainAssociationRequest method.
// req, resp := client.GetDomainAssociationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetDomainAssociation
func (c *Amplify) GetDomainAssociationRequest(input *GetDomainAssociationInput) (req *request.Request, output *GetDomainAssociationOutput) {
op := &request.Operation{
Name: opGetDomainAssociation,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/domains/{domainName}",
}
if input == nil {
input = &GetDomainAssociationInput{}
}
output = &GetDomainAssociationOutput{}
req = c.newRequest(op, input, output)
return
}
// GetDomainAssociation API operation for AWS Amplify.
//
// Returns the domain information for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetDomainAssociation for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetDomainAssociation
func (c *Amplify) GetDomainAssociation(input *GetDomainAssociationInput) (*GetDomainAssociationOutput, error) {
req, out := c.GetDomainAssociationRequest(input)
return out, req.Send()
}
// GetDomainAssociationWithContext is the same as GetDomainAssociation with the addition of
// the ability to pass a context and additional request options.
//
// See GetDomainAssociation for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetDomainAssociationWithContext(ctx aws.Context, input *GetDomainAssociationInput, opts ...request.Option) (*GetDomainAssociationOutput, error) {
req, out := c.GetDomainAssociationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetJob = "GetJob"
// GetJobRequest generates a "aws/request.Request" representing the
// client's request for the GetJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetJob for more information on using the GetJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetJobRequest method.
// req, resp := client.GetJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetJob
func (c *Amplify) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) {
op := &request.Operation{
Name: opGetJob,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
}
if input == nil {
input = &GetJobInput{}
}
output = &GetJobOutput{}
req = c.newRequest(op, input, output)
return
}
// GetJob API operation for AWS Amplify.
//
// Returns a job for a branch of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetJob for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetJob
func (c *Amplify) GetJob(input *GetJobInput) (*GetJobOutput, error) {
req, out := c.GetJobRequest(input)
return out, req.Send()
}
// GetJobWithContext is the same as GetJob with the addition of
// the ability to pass a context and additional request options.
//
// See GetJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) {
req, out := c.GetJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetWebhook = "GetWebhook"
// GetWebhookRequest generates a "aws/request.Request" representing the
// client's request for the GetWebhook operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetWebhook for more information on using the GetWebhook
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetWebhookRequest method.
// req, resp := client.GetWebhookRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetWebhook
func (c *Amplify) GetWebhookRequest(input *GetWebhookInput) (req *request.Request, output *GetWebhookOutput) {
op := &request.Operation{
Name: opGetWebhook,
HTTPMethod: "GET",
HTTPPath: "/webhooks/{webhookId}",
}
if input == nil {
input = &GetWebhookInput{}
}
output = &GetWebhookOutput{}
req = c.newRequest(op, input, output)
return
}
// GetWebhook API operation for AWS Amplify.
//
// Returns the webhook information that corresponds to a specified webhook ID.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation GetWebhook for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetWebhook
func (c *Amplify) GetWebhook(input *GetWebhookInput) (*GetWebhookOutput, error) {
req, out := c.GetWebhookRequest(input)
return out, req.Send()
}
// GetWebhookWithContext is the same as GetWebhook with the addition of
// the ability to pass a context and additional request options.
//
// See GetWebhook for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) GetWebhookWithContext(ctx aws.Context, input *GetWebhookInput, opts ...request.Option) (*GetWebhookOutput, error) {
req, out := c.GetWebhookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListApps = "ListApps"
// ListAppsRequest generates a "aws/request.Request" representing the
// client's request for the ListApps operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListApps for more information on using the ListApps
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListAppsRequest method.
// req, resp := client.ListAppsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps
func (c *Amplify) ListAppsRequest(input *ListAppsInput) (req *request.Request, output *ListAppsOutput) {
op := &request.Operation{
Name: opListApps,
HTTPMethod: "GET",
HTTPPath: "/apps",
}
if input == nil {
input = &ListAppsInput{}
}
output = &ListAppsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListApps API operation for AWS Amplify.
//
// Returns a list of the existing Amplify apps.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListApps for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps
func (c *Amplify) ListApps(input *ListAppsInput) (*ListAppsOutput, error) {
req, out := c.ListAppsRequest(input)
return out, req.Send()
}
// ListAppsWithContext is the same as ListApps with the addition of
// the ability to pass a context and additional request options.
//
// See ListApps for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListAppsWithContext(ctx aws.Context, input *ListAppsInput, opts ...request.Option) (*ListAppsOutput, error) {
req, out := c.ListAppsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListArtifacts = "ListArtifacts"
// ListArtifactsRequest generates a "aws/request.Request" representing the
// client's request for the ListArtifacts operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListArtifacts for more information on using the ListArtifacts
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListArtifactsRequest method.
// req, resp := client.ListArtifactsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListArtifacts
func (c *Amplify) ListArtifactsRequest(input *ListArtifactsInput) (req *request.Request, output *ListArtifactsOutput) {
op := &request.Operation{
Name: opListArtifacts,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}/artifacts",
}
if input == nil {
input = &ListArtifactsInput{}
}
output = &ListArtifactsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListArtifacts API operation for AWS Amplify.
//
// Returns a list of artifacts for a specified app, branch, and job.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListArtifacts for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListArtifacts
func (c *Amplify) ListArtifacts(input *ListArtifactsInput) (*ListArtifactsOutput, error) {
req, out := c.ListArtifactsRequest(input)
return out, req.Send()
}
// ListArtifactsWithContext is the same as ListArtifacts with the addition of
// the ability to pass a context and additional request options.
//
// See ListArtifacts for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListArtifactsWithContext(ctx aws.Context, input *ListArtifactsInput, opts ...request.Option) (*ListArtifactsOutput, error) {
req, out := c.ListArtifactsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListBackendEnvironments = "ListBackendEnvironments"
// ListBackendEnvironmentsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackendEnvironments operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackendEnvironments for more information on using the ListBackendEnvironments
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackendEnvironmentsRequest method.
// req, resp := client.ListBackendEnvironmentsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBackendEnvironments
func (c *Amplify) ListBackendEnvironmentsRequest(input *ListBackendEnvironmentsInput) (req *request.Request, output *ListBackendEnvironmentsOutput) {
op := &request.Operation{
Name: opListBackendEnvironments,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/backendenvironments",
}
if input == nil {
input = &ListBackendEnvironmentsInput{}
}
output = &ListBackendEnvironmentsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackendEnvironments API operation for AWS Amplify.
//
// Lists the backend environments for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListBackendEnvironments for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBackendEnvironments
func (c *Amplify) ListBackendEnvironments(input *ListBackendEnvironmentsInput) (*ListBackendEnvironmentsOutput, error) {
req, out := c.ListBackendEnvironmentsRequest(input)
return out, req.Send()
}
// ListBackendEnvironmentsWithContext is the same as ListBackendEnvironments with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackendEnvironments for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListBackendEnvironmentsWithContext(ctx aws.Context, input *ListBackendEnvironmentsInput, opts ...request.Option) (*ListBackendEnvironmentsOutput, error) {
req, out := c.ListBackendEnvironmentsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListBranches = "ListBranches"
// ListBranchesRequest generates a "aws/request.Request" representing the
// client's request for the ListBranches operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBranches for more information on using the ListBranches
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBranchesRequest method.
// req, resp := client.ListBranchesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches
func (c *Amplify) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) {
op := &request.Operation{
Name: opListBranches,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/branches",
}
if input == nil {
input = &ListBranchesInput{}
}
output = &ListBranchesOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBranches API operation for AWS Amplify.
//
// Lists the branches of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListBranches for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches
func (c *Amplify) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) {
req, out := c.ListBranchesRequest(input)
return out, req.Send()
}
// ListBranchesWithContext is the same as ListBranches with the addition of
// the ability to pass a context and additional request options.
//
// See ListBranches for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListBranchesWithContext(ctx aws.Context, input *ListBranchesInput, opts ...request.Option) (*ListBranchesOutput, error) {
req, out := c.ListBranchesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListDomainAssociations = "ListDomainAssociations"
// ListDomainAssociationsRequest generates a "aws/request.Request" representing the
// client's request for the ListDomainAssociations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListDomainAssociations for more information on using the ListDomainAssociations
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListDomainAssociationsRequest method.
// req, resp := client.ListDomainAssociationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations
func (c *Amplify) ListDomainAssociationsRequest(input *ListDomainAssociationsInput) (req *request.Request, output *ListDomainAssociationsOutput) {
op := &request.Operation{
Name: opListDomainAssociations,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/domains",
}
if input == nil {
input = &ListDomainAssociationsInput{}
}
output = &ListDomainAssociationsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListDomainAssociations API operation for AWS Amplify.
//
// Returns the domain associations for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListDomainAssociations for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations
func (c *Amplify) ListDomainAssociations(input *ListDomainAssociationsInput) (*ListDomainAssociationsOutput, error) {
req, out := c.ListDomainAssociationsRequest(input)
return out, req.Send()
}
// ListDomainAssociationsWithContext is the same as ListDomainAssociations with the addition of
// the ability to pass a context and additional request options.
//
// See ListDomainAssociations for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListDomainAssociationsWithContext(ctx aws.Context, input *ListDomainAssociationsInput, opts ...request.Option) (*ListDomainAssociationsOutput, error) {
req, out := c.ListDomainAssociationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListJobs = "ListJobs"
// ListJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListJobs for more information on using the ListJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListJobsRequest method.
// req, resp := client.ListJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListJobs
func (c *Amplify) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) {
op := &request.Operation{
Name: opListJobs,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs",
}
if input == nil {
input = &ListJobsInput{}
}
output = &ListJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListJobs API operation for AWS Amplify.
//
// Lists the jobs for a branch of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListJobs for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListJobs
func (c *Amplify) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) {
req, out := c.ListJobsRequest(input)
return out, req.Send()
}
// ListJobsWithContext is the same as ListJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) {
req, out := c.ListJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListTagsForResource = "ListTagsForResource"
// ListTagsForResourceRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsForResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListTagsForResource for more information on using the ListTagsForResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListTagsForResourceRequest method.
// req, resp := client.ListTagsForResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListTagsForResource
func (c *Amplify) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &ListTagsForResourceOutput{}
req = c.newRequest(op, input, output)
return
}
// ListTagsForResource API operation for AWS Amplify.
//
// Returns a list of tags for a specified Amazon Resource Name (ARN).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListTagsForResource for usage and error information.
//
// Returned Error Types:
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * BadRequestException
// A request contains unexpected data.
//
// * ResourceNotFoundException
// An operation failed due to a non-existent resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListTagsForResource
func (c *Amplify) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) {
req, out := c.ListTagsForResourceRequest(input)
return out, req.Send()
}
// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
// the ability to pass a context and additional request options.
//
// See ListTagsForResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) {
req, out := c.ListTagsForResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListWebhooks = "ListWebhooks"
// ListWebhooksRequest generates a "aws/request.Request" representing the
// client's request for the ListWebhooks operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListWebhooks for more information on using the ListWebhooks
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListWebhooksRequest method.
// req, resp := client.ListWebhooksRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListWebhooks
func (c *Amplify) ListWebhooksRequest(input *ListWebhooksInput) (req *request.Request, output *ListWebhooksOutput) {
op := &request.Operation{
Name: opListWebhooks,
HTTPMethod: "GET",
HTTPPath: "/apps/{appId}/webhooks",
}
if input == nil {
input = &ListWebhooksInput{}
}
output = &ListWebhooksOutput{}
req = c.newRequest(op, input, output)
return
}
// ListWebhooks API operation for AWS Amplify.
//
// Returns a list of webhooks for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation ListWebhooks for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListWebhooks
func (c *Amplify) ListWebhooks(input *ListWebhooksInput) (*ListWebhooksOutput, error) {
req, out := c.ListWebhooksRequest(input)
return out, req.Send()
}
// ListWebhooksWithContext is the same as ListWebhooks with the addition of
// the ability to pass a context and additional request options.
//
// See ListWebhooks for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) ListWebhooksWithContext(ctx aws.Context, input *ListWebhooksInput, opts ...request.Option) (*ListWebhooksOutput, error) {
req, out := c.ListWebhooksRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartDeployment = "StartDeployment"
// StartDeploymentRequest generates a "aws/request.Request" representing the
// client's request for the StartDeployment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartDeployment for more information on using the StartDeployment
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartDeploymentRequest method.
// req, resp := client.StartDeploymentRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartDeployment
func (c *Amplify) StartDeploymentRequest(input *StartDeploymentInput) (req *request.Request, output *StartDeploymentOutput) {
op := &request.Operation{
Name: opStartDeployment,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/branches/{branchName}/deployments/start",
}
if input == nil {
input = &StartDeploymentInput{}
}
output = &StartDeploymentOutput{}
req = c.newRequest(op, input, output)
return
}
// StartDeployment API operation for AWS Amplify.
//
// Starts a deployment for a manually deployed app. Manually deployed apps are
// not connected to a repository.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation StartDeployment for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartDeployment
func (c *Amplify) StartDeployment(input *StartDeploymentInput) (*StartDeploymentOutput, error) {
req, out := c.StartDeploymentRequest(input)
return out, req.Send()
}
// StartDeploymentWithContext is the same as StartDeployment with the addition of
// the ability to pass a context and additional request options.
//
// See StartDeployment for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) StartDeploymentWithContext(ctx aws.Context, input *StartDeploymentInput, opts ...request.Option) (*StartDeploymentOutput, error) {
req, out := c.StartDeploymentRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartJob = "StartJob"
// StartJobRequest generates a "aws/request.Request" representing the
// client's request for the StartJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartJob for more information on using the StartJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartJobRequest method.
// req, resp := client.StartJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartJob
func (c *Amplify) StartJobRequest(input *StartJobInput) (req *request.Request, output *StartJobOutput) {
op := &request.Operation{
Name: opStartJob,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs",
}
if input == nil {
input = &StartJobInput{}
}
output = &StartJobOutput{}
req = c.newRequest(op, input, output)
return
}
// StartJob API operation for AWS Amplify.
//
// Starts a new job for a branch of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation StartJob for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartJob
func (c *Amplify) StartJob(input *StartJobInput) (*StartJobOutput, error) {
req, out := c.StartJobRequest(input)
return out, req.Send()
}
// StartJobWithContext is the same as StartJob with the addition of
// the ability to pass a context and additional request options.
//
// See StartJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) StartJobWithContext(ctx aws.Context, input *StartJobInput, opts ...request.Option) (*StartJobOutput, error) {
req, out := c.StartJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStopJob = "StopJob"
// StopJobRequest generates a "aws/request.Request" representing the
// client's request for the StopJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StopJob for more information on using the StopJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StopJobRequest method.
// req, resp := client.StopJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StopJob
func (c *Amplify) StopJobRequest(input *StopJobInput) (req *request.Request, output *StopJobOutput) {
op := &request.Operation{
Name: opStopJob,
HTTPMethod: "DELETE",
HTTPPath: "/apps/{appId}/branches/{branchName}/jobs/{jobId}/stop",
}
if input == nil {
input = &StopJobInput{}
}
output = &StopJobOutput{}
req = c.newRequest(op, input, output)
return
}
// StopJob API operation for AWS Amplify.
//
// Stops a job that is in progress for a branch of an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation StopJob for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * LimitExceededException
// A resource could not be created because service quotas were exceeded.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StopJob
func (c *Amplify) StopJob(input *StopJobInput) (*StopJobOutput, error) {
req, out := c.StopJobRequest(input)
return out, req.Send()
}
// StopJobWithContext is the same as StopJob with the addition of
// the ability to pass a context and additional request options.
//
// See StopJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) StopJobWithContext(ctx aws.Context, input *StopJobInput, opts ...request.Option) (*StopJobOutput, error) {
req, out := c.StopJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTagResource = "TagResource"
// TagResourceRequest generates a "aws/request.Request" representing the
// client's request for the TagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TagResource for more information on using the TagResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TagResourceRequest method.
// req, resp := client.TagResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/TagResource
func (c *Amplify) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
op := &request.Operation{
Name: opTagResource,
HTTPMethod: "POST",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &TagResourceInput{}
}
output = &TagResourceOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// TagResource API operation for AWS Amplify.
//
// Tags the resource with a tag key and value.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation TagResource for usage and error information.
//
// Returned Error Types:
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * BadRequestException
// A request contains unexpected data.
//
// * ResourceNotFoundException
// An operation failed due to a non-existent resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/TagResource
func (c *Amplify) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
req, out := c.TagResourceRequest(input)
return out, req.Send()
}
// TagResourceWithContext is the same as TagResource with the addition of
// the ability to pass a context and additional request options.
//
// See TagResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
req, out := c.TagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUntagResource = "UntagResource"
// UntagResourceRequest generates a "aws/request.Request" representing the
// client's request for the UntagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UntagResource for more information on using the UntagResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UntagResourceRequest method.
// req, resp := client.UntagResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UntagResource
func (c *Amplify) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
op := &request.Operation{
Name: opUntagResource,
HTTPMethod: "DELETE",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &UntagResourceInput{}
}
output = &UntagResourceOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UntagResource API operation for AWS Amplify.
//
// Untags a resource with a specified Amazon Resource Name (ARN).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation UntagResource for usage and error information.
//
// Returned Error Types:
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * BadRequestException
// A request contains unexpected data.
//
// * ResourceNotFoundException
// An operation failed due to a non-existent resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UntagResource
func (c *Amplify) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
return out, req.Send()
}
// UntagResourceWithContext is the same as UntagResource with the addition of
// the ability to pass a context and additional request options.
//
// See UntagResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateApp = "UpdateApp"
// UpdateAppRequest generates a "aws/request.Request" representing the
// client's request for the UpdateApp operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateApp for more information on using the UpdateApp
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateAppRequest method.
// req, resp := client.UpdateAppRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateApp
func (c *Amplify) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) {
op := &request.Operation{
Name: opUpdateApp,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}",
}
if input == nil {
input = &UpdateAppInput{}
}
output = &UpdateAppOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateApp API operation for AWS Amplify.
//
// Updates an existing Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation UpdateApp for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateApp
func (c *Amplify) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) {
req, out := c.UpdateAppRequest(input)
return out, req.Send()
}
// UpdateAppWithContext is the same as UpdateApp with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateApp for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) UpdateAppWithContext(ctx aws.Context, input *UpdateAppInput, opts ...request.Option) (*UpdateAppOutput, error) {
req, out := c.UpdateAppRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateBranch = "UpdateBranch"
// UpdateBranchRequest generates a "aws/request.Request" representing the
// client's request for the UpdateBranch operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateBranch for more information on using the UpdateBranch
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateBranchRequest method.
// req, resp := client.UpdateBranchRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch
func (c *Amplify) UpdateBranchRequest(input *UpdateBranchInput) (req *request.Request, output *UpdateBranchOutput) {
op := &request.Operation{
Name: opUpdateBranch,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/branches/{branchName}",
}
if input == nil {
input = &UpdateBranchInput{}
}
output = &UpdateBranchOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateBranch API operation for AWS Amplify.
//
// Updates a branch for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation UpdateBranch for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch
func (c *Amplify) UpdateBranch(input *UpdateBranchInput) (*UpdateBranchOutput, error) {
req, out := c.UpdateBranchRequest(input)
return out, req.Send()
}
// UpdateBranchWithContext is the same as UpdateBranch with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateBranch for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) UpdateBranchWithContext(ctx aws.Context, input *UpdateBranchInput, opts ...request.Option) (*UpdateBranchOutput, error) {
req, out := c.UpdateBranchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateDomainAssociation = "UpdateDomainAssociation"
// UpdateDomainAssociationRequest generates a "aws/request.Request" representing the
// client's request for the UpdateDomainAssociation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateDomainAssociation for more information on using the UpdateDomainAssociation
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateDomainAssociationRequest method.
// req, resp := client.UpdateDomainAssociationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateDomainAssociation
func (c *Amplify) UpdateDomainAssociationRequest(input *UpdateDomainAssociationInput) (req *request.Request, output *UpdateDomainAssociationOutput) {
op := &request.Operation{
Name: opUpdateDomainAssociation,
HTTPMethod: "POST",
HTTPPath: "/apps/{appId}/domains/{domainName}",
}
if input == nil {
input = &UpdateDomainAssociationInput{}
}
output = &UpdateDomainAssociationOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateDomainAssociation API operation for AWS Amplify.
//
// Creates a new domain association for an Amplify app.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation UpdateDomainAssociation for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateDomainAssociation
func (c *Amplify) UpdateDomainAssociation(input *UpdateDomainAssociationInput) (*UpdateDomainAssociationOutput, error) {
req, out := c.UpdateDomainAssociationRequest(input)
return out, req.Send()
}
// UpdateDomainAssociationWithContext is the same as UpdateDomainAssociation with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateDomainAssociation for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) UpdateDomainAssociationWithContext(ctx aws.Context, input *UpdateDomainAssociationInput, opts ...request.Option) (*UpdateDomainAssociationOutput, error) {
req, out := c.UpdateDomainAssociationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateWebhook = "UpdateWebhook"
// UpdateWebhookRequest generates a "aws/request.Request" representing the
// client's request for the UpdateWebhook operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateWebhook for more information on using the UpdateWebhook
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateWebhookRequest method.
// req, resp := client.UpdateWebhookRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateWebhook
func (c *Amplify) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request.Request, output *UpdateWebhookOutput) {
op := &request.Operation{
Name: opUpdateWebhook,
HTTPMethod: "POST",
HTTPPath: "/webhooks/{webhookId}",
}
if input == nil {
input = &UpdateWebhookInput{}
}
output = &UpdateWebhookOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateWebhook API operation for AWS Amplify.
//
// Updates a webhook.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Amplify's
// API operation UpdateWebhook for usage and error information.
//
// Returned Error Types:
// * BadRequestException
// A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access.
//
// * NotFoundException
// An entity was not found during an operation.
//
// * InternalFailureException
// The service failed to perform an operation due to an internal issue.
//
// * DependentServiceFailureException
// An operation failed because a dependent service threw an exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateWebhook
func (c *Amplify) UpdateWebhook(input *UpdateWebhookInput) (*UpdateWebhookOutput, error) {
req, out := c.UpdateWebhookRequest(input)
return out, req.Send()
}
// UpdateWebhookWithContext is the same as UpdateWebhook with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateWebhook for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Amplify) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebhookInput, opts ...request.Option) (*UpdateWebhookOutput, error) {
req, out := c.UpdateWebhookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// Represents the different branches of a repository for building, deploying,
// and hosting an Amplify app.
type App struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the Amplify app.
//
// AppArn is a required field
AppArn *string `locationName:"appArn" type:"string" required:"true"`
// The unique ID of the Amplify app.
//
// AppId is a required field
AppId *string `locationName:"appId" min:"1" type:"string" required:"true"`
// Describes the automated branch creation configuration for the Amplify app.
AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"`
// Describes the automated branch creation glob patterns for the Amplify app.
AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"`
// The basic authorization credentials for branches for the Amplify app.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// Describes the content of the build specification (build spec) for the Amplify
// app.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// Creates a date and time for the Amplify app.
//
// CreateTime is a required field
CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"`
// Describes the custom HTTP headers for the Amplify app.
CustomHeaders *string `locationName:"customHeaders" min:"1" type:"string"`
// Describes the custom redirect and rewrite rules for the Amplify app.
CustomRules []*CustomRule `locationName:"customRules" type:"list"`
// The default domain for the Amplify app.
//
// DefaultDomain is a required field
DefaultDomain *string `locationName:"defaultDomain" min:"1" type:"string" required:"true"`
// The description for the Amplify app.
//
// Description is a required field
Description *string `locationName:"description" type:"string" required:"true"`
// Enables automated branch creation for the Amplify app.
EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"`
// Enables basic authorization for the Amplify app's branches.
//
// EnableBasicAuth is a required field
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"`
// Enables the auto-building of branches for the Amplify app.
//
// EnableBranchAutoBuild is a required field
EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean" required:"true"`
// Automatically disconnect a branch in the Amplify Console when you delete
// a branch from your Git repository.
EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"`
// The environment variables for the Amplify app.
//
// EnvironmentVariables is a required field
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"`
// The AWS Identity and Access Management (IAM) service role for the Amazon
// Resource Name (ARN) of the Amplify app.
IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"`
// The name for the Amplify app.
//
// Name is a required field
Name *string `locationName:"name" min:"1" type:"string" required:"true"`
// The platform for the Amplify app.
//
// Platform is a required field
Platform *string `locationName:"platform" type:"string" required:"true" enum:"Platform"`
// Describes the information about a production branch of the Amplify app.
ProductionBranch *ProductionBranch `locationName:"productionBranch" type:"structure"`
// The repository for the Amplify app.
//
// Repository is a required field
Repository *string `locationName:"repository" type:"string" required:"true"`
// The tag for the Amplify app.
Tags map[string]*string `locationName:"tags" min:"1" type:"map"`
// Updates the date and time for the Amplify app.
//
// UpdateTime is a required field
UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"`
}
// String returns the string representation
func (s App) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s App) GoString() string {
return s.String()
}
// SetAppArn sets the AppArn field's value.
func (s *App) SetAppArn(v string) *App {
s.AppArn = &v
return s
}
// SetAppId sets the AppId field's value.
func (s *App) SetAppId(v string) *App {
s.AppId = &v
return s
}
// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value.
func (s *App) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *App {
s.AutoBranchCreationConfig = v
return s
}
// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value.
func (s *App) SetAutoBranchCreationPatterns(v []*string) *App {
s.AutoBranchCreationPatterns = v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *App) SetBasicAuthCredentials(v string) *App {
s.BasicAuthCredentials = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *App) SetBuildSpec(v string) *App {
s.BuildSpec = &v
return s
}
// SetCreateTime sets the CreateTime field's value.
func (s *App) SetCreateTime(v time.Time) *App {
s.CreateTime = &v
return s
}
// SetCustomHeaders sets the CustomHeaders field's value.
func (s *App) SetCustomHeaders(v string) *App {
s.CustomHeaders = &v
return s
}
// SetCustomRules sets the CustomRules field's value.
func (s *App) SetCustomRules(v []*CustomRule) *App {
s.CustomRules = v
return s
}
// SetDefaultDomain sets the DefaultDomain field's value.
func (s *App) SetDefaultDomain(v string) *App {
s.DefaultDomain = &v
return s
}
// SetDescription sets the Description field's value.
func (s *App) SetDescription(v string) *App {
s.Description = &v
return s
}
// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value.
func (s *App) SetEnableAutoBranchCreation(v bool) *App {
s.EnableAutoBranchCreation = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *App) SetEnableBasicAuth(v bool) *App {
s.EnableBasicAuth = &v
return s
}
// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value.
func (s *App) SetEnableBranchAutoBuild(v bool) *App {
s.EnableBranchAutoBuild = &v
return s
}
// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value.
func (s *App) SetEnableBranchAutoDeletion(v bool) *App {
s.EnableBranchAutoDeletion = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *App) SetEnvironmentVariables(v map[string]*string) *App {
s.EnvironmentVariables = v
return s
}
// SetIamServiceRoleArn sets the IamServiceRoleArn field's value.
func (s *App) SetIamServiceRoleArn(v string) *App {
s.IamServiceRoleArn = &v
return s
}
// SetName sets the Name field's value.
func (s *App) SetName(v string) *App {
s.Name = &v
return s
}
// SetPlatform sets the Platform field's value.
func (s *App) SetPlatform(v string) *App {
s.Platform = &v
return s
}
// SetProductionBranch sets the ProductionBranch field's value.
func (s *App) SetProductionBranch(v *ProductionBranch) *App {
s.ProductionBranch = v
return s
}
// SetRepository sets the Repository field's value.
func (s *App) SetRepository(v string) *App {
s.Repository = &v
return s
}
// SetTags sets the Tags field's value.
func (s *App) SetTags(v map[string]*string) *App {
s.Tags = v
return s
}
// SetUpdateTime sets the UpdateTime field's value.
func (s *App) SetUpdateTime(v time.Time) *App {
s.UpdateTime = &v
return s
}
// Describes an artifact.
type Artifact struct {
_ struct{} `type:"structure"`
// The file name for the artifact.
//
// ArtifactFileName is a required field
ArtifactFileName *string `locationName:"artifactFileName" type:"string" required:"true"`
// The unique ID for the artifact.
//
// ArtifactId is a required field
ArtifactId *string `locationName:"artifactId" type:"string" required:"true"`
}
// String returns the string representation
func (s Artifact) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Artifact) GoString() string {
return s.String()
}
// SetArtifactFileName sets the ArtifactFileName field's value.
func (s *Artifact) SetArtifactFileName(v string) *Artifact {
s.ArtifactFileName = &v
return s
}
// SetArtifactId sets the ArtifactId field's value.
func (s *Artifact) SetArtifactId(v string) *Artifact {
s.ArtifactId = &v
return s
}
// Describes the automated branch creation configuration.
type AutoBranchCreationConfig struct {
_ struct{} `type:"structure"`
// The basic authorization credentials for the autocreated branch.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The build specification (build spec) for the autocreated branch.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// Enables auto building for the autocreated branch.
EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"`
// Enables basic authorization for the autocreated branch.
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"`
// Enables performance mode for the branch.
//
// Performance mode optimizes for faster hosting performance by keeping content
// cached at the edge for a longer interval. When performance mode is enabled,
// hosting configuration or code changes can take up to 10 minutes to roll out.
EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"`
// Enables pull request previews for the autocreated branch.
EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"`
// The environment variables for the autocreated branch.
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"`
// The framework for the autocreated branch.
Framework *string `locationName:"framework" type:"string"`
// The Amplify environment name for the pull request.
PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"`
// Describes the current stage for the autocreated branch.
Stage *string `locationName:"stage" type:"string" enum:"Stage"`
}
// String returns the string representation
func (s AutoBranchCreationConfig) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AutoBranchCreationConfig) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *AutoBranchCreationConfig) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "AutoBranchCreationConfig"}
if s.BuildSpec != nil && len(*s.BuildSpec) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *AutoBranchCreationConfig) SetBasicAuthCredentials(v string) *AutoBranchCreationConfig {
s.BasicAuthCredentials = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *AutoBranchCreationConfig) SetBuildSpec(v string) *AutoBranchCreationConfig {
s.BuildSpec = &v
return s
}
// SetEnableAutoBuild sets the EnableAutoBuild field's value.
func (s *AutoBranchCreationConfig) SetEnableAutoBuild(v bool) *AutoBranchCreationConfig {
s.EnableAutoBuild = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *AutoBranchCreationConfig) SetEnableBasicAuth(v bool) *AutoBranchCreationConfig {
s.EnableBasicAuth = &v
return s
}
// SetEnablePerformanceMode sets the EnablePerformanceMode field's value.
func (s *AutoBranchCreationConfig) SetEnablePerformanceMode(v bool) *AutoBranchCreationConfig {
s.EnablePerformanceMode = &v
return s
}
// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value.
func (s *AutoBranchCreationConfig) SetEnablePullRequestPreview(v bool) *AutoBranchCreationConfig {
s.EnablePullRequestPreview = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *AutoBranchCreationConfig) SetEnvironmentVariables(v map[string]*string) *AutoBranchCreationConfig {
s.EnvironmentVariables = v
return s
}
// SetFramework sets the Framework field's value.
func (s *AutoBranchCreationConfig) SetFramework(v string) *AutoBranchCreationConfig {
s.Framework = &v
return s
}
// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value.
func (s *AutoBranchCreationConfig) SetPullRequestEnvironmentName(v string) *AutoBranchCreationConfig {
s.PullRequestEnvironmentName = &v
return s
}
// SetStage sets the Stage field's value.
func (s *AutoBranchCreationConfig) SetStage(v string) *AutoBranchCreationConfig {
s.Stage = &v
return s
}
// Describes the backend environment for an Amplify app.
type BackendEnvironment struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) for a backend environment that is part of
// an Amplify app.
//
// BackendEnvironmentArn is a required field
BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string" required:"true"`
// The creation date and time for a backend environment that is part of an Amplify
// app.
//
// CreateTime is a required field
CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"`
// The name of deployment artifacts.
DeploymentArtifacts *string `locationName:"deploymentArtifacts" min:"1" type:"string"`
// The name for a backend environment that is part of an Amplify app.
//
// EnvironmentName is a required field
EnvironmentName *string `locationName:"environmentName" min:"1" type:"string" required:"true"`
// The AWS CloudFormation stack name of a backend environment.
StackName *string `locationName:"stackName" min:"1" type:"string"`
// The last updated date and time for a backend environment that is part of
// an Amplify app.
//
// UpdateTime is a required field
UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"`
}
// String returns the string representation
func (s BackendEnvironment) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BackendEnvironment) GoString() string {
return s.String()
}
// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value.
func (s *BackendEnvironment) SetBackendEnvironmentArn(v string) *BackendEnvironment {
s.BackendEnvironmentArn = &v
return s
}
// SetCreateTime sets the CreateTime field's value.
func (s *BackendEnvironment) SetCreateTime(v time.Time) *BackendEnvironment {
s.CreateTime = &v
return s
}
// SetDeploymentArtifacts sets the DeploymentArtifacts field's value.
func (s *BackendEnvironment) SetDeploymentArtifacts(v string) *BackendEnvironment {
s.DeploymentArtifacts = &v
return s
}
// SetEnvironmentName sets the EnvironmentName field's value.
func (s *BackendEnvironment) SetEnvironmentName(v string) *BackendEnvironment {
s.EnvironmentName = &v
return s
}
// SetStackName sets the StackName field's value.
func (s *BackendEnvironment) SetStackName(v string) *BackendEnvironment {
s.StackName = &v
return s
}
// SetUpdateTime sets the UpdateTime field's value.
func (s *BackendEnvironment) SetUpdateTime(v time.Time) *BackendEnvironment {
s.UpdateTime = &v
return s
}
// A request contains unexpected data.
type BadRequestException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s BadRequestException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BadRequestException) GoString() string {
return s.String()
}
func newErrorBadRequestException(v protocol.ResponseMetadata) error {
return &BadRequestException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *BadRequestException) Code() string {
return "BadRequestException"
}
// Message returns the exception's message.
func (s *BadRequestException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *BadRequestException) OrigErr() error {
return nil
}
func (s *BadRequestException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *BadRequestException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *BadRequestException) RequestID() string {
return s.RespMetadata.RequestID
}
// The branch for an Amplify app, which maps to a third-party repository branch.
type Branch struct {
_ struct{} `type:"structure"`
// The ID of the active job for a branch of an Amplify app.
//
// ActiveJobId is a required field
ActiveJobId *string `locationName:"activeJobId" type:"string" required:"true"`
// A list of custom resources that are linked to this branch.
AssociatedResources []*string `locationName:"associatedResources" type:"list"`
// The Amazon Resource Name (ARN) for a backend environment that is part of
// an Amplify app.
BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"`
// The basic authorization credentials for a branch of an Amplify app.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The Amazon Resource Name (ARN) for a branch that is part of an Amplify app.
//
// BranchArn is a required field
BranchArn *string `locationName:"branchArn" type:"string" required:"true"`
// The name for the branch that is part of an Amplify app.
//
// BranchName is a required field
BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"`
// The build specification (build spec) content for the branch of an Amplify
// app.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// The creation date and time for a branch that is part of an Amplify app.
//
// CreateTime is a required field
CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"`
// The custom domains for a branch of an Amplify app.
//
// CustomDomains is a required field
CustomDomains []*string `locationName:"customDomains" type:"list" required:"true"`
// The description for the branch that is part of an Amplify app.
//
// Description is a required field
Description *string `locationName:"description" type:"string" required:"true"`
// The destination branch if the branch is a pull request branch.
DestinationBranch *string `locationName:"destinationBranch" min:"1" type:"string"`
// The display name for the branch. This is used as the default domain prefix.
//
// DisplayName is a required field
DisplayName *string `locationName:"displayName" type:"string" required:"true"`
// Enables auto-building on push for a branch of an Amplify app.
//
// EnableAutoBuild is a required field
EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean" required:"true"`
// Enables basic authorization for a branch of an Amplify app.
//
// EnableBasicAuth is a required field
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"`
// Enables notifications for a branch that is part of an Amplify app.
//
// EnableNotification is a required field
EnableNotification *bool `locationName:"enableNotification" type:"boolean" required:"true"`
// Enables performance mode for the branch.
//
// Performance mode optimizes for faster hosting performance by keeping content
// cached at the edge for a longer interval. When performance mode is enabled,
// hosting configuration or code changes can take up to 10 minutes to roll out.
EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"`
// Enables pull request previews for the branch.
//
// EnablePullRequestPreview is a required field
EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean" required:"true"`
// The environment variables specific to a branch of an Amplify app.
//
// EnvironmentVariables is a required field
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"`
// The framework for a branch of an Amplify app.
//
// Framework is a required field
Framework *string `locationName:"framework" type:"string" required:"true"`
// The Amplify environment name for the pull request.
PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"`
// The source branch if the branch is a pull request branch.
SourceBranch *string `locationName:"sourceBranch" min:"1" type:"string"`
// The current stage for the branch that is part of an Amplify app.
//
// Stage is a required field
Stage *string `locationName:"stage" type:"string" required:"true" enum:"Stage"`
// The tag for the branch of an Amplify app.
Tags map[string]*string `locationName:"tags" min:"1" type:"map"`
// The thumbnail URL for the branch of an Amplify app.
ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"`
// The total number of jobs that are part of an Amplify app.
//
// TotalNumberOfJobs is a required field
TotalNumberOfJobs *string `locationName:"totalNumberOfJobs" type:"string" required:"true"`
// The content Time to Live (TTL) for the website in seconds.
//
// Ttl is a required field
Ttl *string `locationName:"ttl" type:"string" required:"true"`
// The last updated date and time for a branch that is part of an Amplify app.
//
// UpdateTime is a required field
UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"`
}
// String returns the string representation
func (s Branch) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Branch) GoString() string {
return s.String()
}
// SetActiveJobId sets the ActiveJobId field's value.
func (s *Branch) SetActiveJobId(v string) *Branch {
s.ActiveJobId = &v
return s
}
// SetAssociatedResources sets the AssociatedResources field's value.
func (s *Branch) SetAssociatedResources(v []*string) *Branch {
s.AssociatedResources = v
return s
}
// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value.
func (s *Branch) SetBackendEnvironmentArn(v string) *Branch {
s.BackendEnvironmentArn = &v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *Branch) SetBasicAuthCredentials(v string) *Branch {
s.BasicAuthCredentials = &v
return s
}
// SetBranchArn sets the BranchArn field's value.
func (s *Branch) SetBranchArn(v string) *Branch {
s.BranchArn = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *Branch) SetBranchName(v string) *Branch {
s.BranchName = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *Branch) SetBuildSpec(v string) *Branch {
s.BuildSpec = &v
return s
}
// SetCreateTime sets the CreateTime field's value.
func (s *Branch) SetCreateTime(v time.Time) *Branch {
s.CreateTime = &v
return s
}
// SetCustomDomains sets the CustomDomains field's value.
func (s *Branch) SetCustomDomains(v []*string) *Branch {
s.CustomDomains = v
return s
}
// SetDescription sets the Description field's value.
func (s *Branch) SetDescription(v string) *Branch {
s.Description = &v
return s
}
// SetDestinationBranch sets the DestinationBranch field's value.
func (s *Branch) SetDestinationBranch(v string) *Branch {
s.DestinationBranch = &v
return s
}
// SetDisplayName sets the DisplayName field's value.
func (s *Branch) SetDisplayName(v string) *Branch {
s.DisplayName = &v
return s
}
// SetEnableAutoBuild sets the EnableAutoBuild field's value.
func (s *Branch) SetEnableAutoBuild(v bool) *Branch {
s.EnableAutoBuild = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *Branch) SetEnableBasicAuth(v bool) *Branch {
s.EnableBasicAuth = &v
return s
}
// SetEnableNotification sets the EnableNotification field's value.
func (s *Branch) SetEnableNotification(v bool) *Branch {
s.EnableNotification = &v
return s
}
// SetEnablePerformanceMode sets the EnablePerformanceMode field's value.
func (s *Branch) SetEnablePerformanceMode(v bool) *Branch {
s.EnablePerformanceMode = &v
return s
}
// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value.
func (s *Branch) SetEnablePullRequestPreview(v bool) *Branch {
s.EnablePullRequestPreview = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *Branch) SetEnvironmentVariables(v map[string]*string) *Branch {
s.EnvironmentVariables = v
return s
}
// SetFramework sets the Framework field's value.
func (s *Branch) SetFramework(v string) *Branch {
s.Framework = &v
return s
}
// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value.
func (s *Branch) SetPullRequestEnvironmentName(v string) *Branch {
s.PullRequestEnvironmentName = &v
return s
}
// SetSourceBranch sets the SourceBranch field's value.
func (s *Branch) SetSourceBranch(v string) *Branch {
s.SourceBranch = &v
return s
}
// SetStage sets the Stage field's value.
func (s *Branch) SetStage(v string) *Branch {
s.Stage = &v
return s
}
// SetTags sets the Tags field's value.
func (s *Branch) SetTags(v map[string]*string) *Branch {
s.Tags = v
return s
}
// SetThumbnailUrl sets the ThumbnailUrl field's value.
func (s *Branch) SetThumbnailUrl(v string) *Branch {
s.ThumbnailUrl = &v
return s
}
// SetTotalNumberOfJobs sets the TotalNumberOfJobs field's value.
func (s *Branch) SetTotalNumberOfJobs(v string) *Branch {
s.TotalNumberOfJobs = &v
return s
}
// SetTtl sets the Ttl field's value.
func (s *Branch) SetTtl(v string) *Branch {
s.Ttl = &v
return s
}
// SetUpdateTime sets the UpdateTime field's value.
func (s *Branch) SetUpdateTime(v time.Time) *Branch {
s.UpdateTime = &v
return s
}
// The request structure used to create apps in Amplify.
type CreateAppInput struct {
_ struct{} `type:"structure"`
// The personal access token for a third-party source control system for an
// Amplify app. The personal access token is used to create a webhook and a
// read-only deploy key. The token is not stored.
AccessToken *string `locationName:"accessToken" min:"1" type:"string" sensitive:"true"`
// The automated branch creation configuration for an Amplify app.
AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"`
// The automated branch creation glob patterns for an Amplify app.
AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"`
// The credentials for basic authorization for an Amplify app.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The build specification (build spec) for an Amplify app.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// The custom HTTP headers for an Amplify app.
CustomHeaders *string `locationName:"customHeaders" min:"1" type:"string"`
// The custom rewrite and redirect rules for an Amplify app.
CustomRules []*CustomRule `locationName:"customRules" type:"list"`
// The description for an Amplify app.
Description *string `locationName:"description" type:"string"`
// Enables automated branch creation for an Amplify app.
EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"`
// Enables basic authorization for an Amplify app. This will apply to all branches
// that are part of this app.
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"`
// Enables the auto building of branches for an Amplify app.
EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"`
// Automatically disconnects a branch in the Amplify Console when you delete
// a branch from your Git repository.
EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"`
// The environment variables map for an Amplify app.
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"`
// The AWS Identity and Access Management (IAM) service role for an Amplify
// app.
IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"`
// The name for an Amplify app.
//
// Name is a required field
Name *string `locationName:"name" min:"1" type:"string" required:"true"`
// The OAuth token for a third-party source control system for an Amplify app.
// The OAuth token is used to create a webhook and a read-only deploy key. The
// OAuth token is not stored.
OauthToken *string `locationName:"oauthToken" type:"string" sensitive:"true"`
// The platform or framework for an Amplify app.
Platform *string `locationName:"platform" type:"string" enum:"Platform"`
// The repository for an Amplify app.
Repository *string `locationName:"repository" type:"string"`
// The tag for an Amplify app.
Tags map[string]*string `locationName:"tags" min:"1" type:"map"`
}
// String returns the string representation
func (s CreateAppInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAppInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateAppInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateAppInput"}
if s.AccessToken != nil && len(*s.AccessToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AccessToken", 1))
}
if s.BuildSpec != nil && len(*s.BuildSpec) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1))
}
if s.CustomHeaders != nil && len(*s.CustomHeaders) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CustomHeaders", 1))
}
if s.IamServiceRoleArn != nil && len(*s.IamServiceRoleArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("IamServiceRoleArn", 1))
}
if s.Name == nil {
invalidParams.Add(request.NewErrParamRequired("Name"))
}
if s.Name != nil && len(*s.Name) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
}
if s.Tags != nil && len(s.Tags) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
}
if s.AutoBranchCreationConfig != nil {
if err := s.AutoBranchCreationConfig.Validate(); err != nil {
invalidParams.AddNested("AutoBranchCreationConfig", err.(request.ErrInvalidParams))
}
}
if s.CustomRules != nil {
for i, v := range s.CustomRules {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomRules", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAccessToken sets the AccessToken field's value.
func (s *CreateAppInput) SetAccessToken(v string) *CreateAppInput {
s.AccessToken = &v
return s
}
// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value.
func (s *CreateAppInput) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *CreateAppInput {
s.AutoBranchCreationConfig = v
return s
}
// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value.
func (s *CreateAppInput) SetAutoBranchCreationPatterns(v []*string) *CreateAppInput {
s.AutoBranchCreationPatterns = v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *CreateAppInput) SetBasicAuthCredentials(v string) *CreateAppInput {
s.BasicAuthCredentials = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *CreateAppInput) SetBuildSpec(v string) *CreateAppInput {
s.BuildSpec = &v
return s
}
// SetCustomHeaders sets the CustomHeaders field's value.
func (s *CreateAppInput) SetCustomHeaders(v string) *CreateAppInput {
s.CustomHeaders = &v
return s
}
// SetCustomRules sets the CustomRules field's value.
func (s *CreateAppInput) SetCustomRules(v []*CustomRule) *CreateAppInput {
s.CustomRules = v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateAppInput) SetDescription(v string) *CreateAppInput {
s.Description = &v
return s
}
// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value.
func (s *CreateAppInput) SetEnableAutoBranchCreation(v bool) *CreateAppInput {
s.EnableAutoBranchCreation = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *CreateAppInput) SetEnableBasicAuth(v bool) *CreateAppInput {
s.EnableBasicAuth = &v
return s
}
// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value.
func (s *CreateAppInput) SetEnableBranchAutoBuild(v bool) *CreateAppInput {
s.EnableBranchAutoBuild = &v
return s
}
// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value.
func (s *CreateAppInput) SetEnableBranchAutoDeletion(v bool) *CreateAppInput {
s.EnableBranchAutoDeletion = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *CreateAppInput) SetEnvironmentVariables(v map[string]*string) *CreateAppInput {
s.EnvironmentVariables = v
return s
}
// SetIamServiceRoleArn sets the IamServiceRoleArn field's value.
func (s *CreateAppInput) SetIamServiceRoleArn(v string) *CreateAppInput {
s.IamServiceRoleArn = &v
return s
}
// SetName sets the Name field's value.
func (s *CreateAppInput) SetName(v string) *CreateAppInput {
s.Name = &v
return s
}
// SetOauthToken sets the OauthToken field's value.
func (s *CreateAppInput) SetOauthToken(v string) *CreateAppInput {
s.OauthToken = &v
return s
}
// SetPlatform sets the Platform field's value.
func (s *CreateAppInput) SetPlatform(v string) *CreateAppInput {
s.Platform = &v
return s
}
// SetRepository sets the Repository field's value.
func (s *CreateAppInput) SetRepository(v string) *CreateAppInput {
s.Repository = &v
return s
}
// SetTags sets the Tags field's value.
func (s *CreateAppInput) SetTags(v map[string]*string) *CreateAppInput {
s.Tags = v
return s
}
type CreateAppOutput struct {
_ struct{} `type:"structure"`
// Represents the different branches of a repository for building, deploying,
// and hosting an Amplify app.
//
// App is a required field
App *App `locationName:"app" type:"structure" required:"true"`
}
// String returns the string representation
func (s CreateAppOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateAppOutput) GoString() string {
return s.String()
}
// SetApp sets the App field's value.
func (s *CreateAppOutput) SetApp(v *App) *CreateAppOutput {
s.App = v
return s
}
// The request structure for the backend environment create request.
type CreateBackendEnvironmentInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of deployment artifacts.
DeploymentArtifacts *string `locationName:"deploymentArtifacts" min:"1" type:"string"`
// The name for the backend environment.
//
// EnvironmentName is a required field
EnvironmentName *string `locationName:"environmentName" min:"1" type:"string" required:"true"`
// The AWS CloudFormation stack name of a backend environment.
StackName *string `locationName:"stackName" min:"1" type:"string"`
}
// String returns the string representation
func (s CreateBackendEnvironmentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackendEnvironmentInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateBackendEnvironmentInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateBackendEnvironmentInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DeploymentArtifacts != nil && len(*s.DeploymentArtifacts) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeploymentArtifacts", 1))
}
if s.EnvironmentName == nil {
invalidParams.Add(request.NewErrParamRequired("EnvironmentName"))
}
if s.EnvironmentName != nil && len(*s.EnvironmentName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 1))
}
if s.StackName != nil && len(*s.StackName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("StackName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *CreateBackendEnvironmentInput) SetAppId(v string) *CreateBackendEnvironmentInput {
s.AppId = &v
return s
}
// SetDeploymentArtifacts sets the DeploymentArtifacts field's value.
func (s *CreateBackendEnvironmentInput) SetDeploymentArtifacts(v string) *CreateBackendEnvironmentInput {
s.DeploymentArtifacts = &v
return s
}
// SetEnvironmentName sets the EnvironmentName field's value.
func (s *CreateBackendEnvironmentInput) SetEnvironmentName(v string) *CreateBackendEnvironmentInput {
s.EnvironmentName = &v
return s
}
// SetStackName sets the StackName field's value.
func (s *CreateBackendEnvironmentInput) SetStackName(v string) *CreateBackendEnvironmentInput {
s.StackName = &v
return s
}
// The result structure for the create backend environment request.
type CreateBackendEnvironmentOutput struct {
_ struct{} `type:"structure"`
// Describes the backend environment for an Amplify app.
//
// BackendEnvironment is a required field
BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"`
}
// String returns the string representation
func (s CreateBackendEnvironmentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackendEnvironmentOutput) GoString() string {
return s.String()
}
// SetBackendEnvironment sets the BackendEnvironment field's value.
func (s *CreateBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnvironment) *CreateBackendEnvironmentOutput {
s.BackendEnvironment = v
return s
}
// The request structure for the create branch request.
type CreateBranchInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The Amazon Resource Name (ARN) for a backend environment that is part of
// an Amplify app.
BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"`
// The basic authorization credentials for the branch.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The name for the branch.
//
// BranchName is a required field
BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"`
// The build specification (build spec) for the branch.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// The description for the branch.
Description *string `locationName:"description" type:"string"`
// The display name for a branch. This is used as the default domain prefix.
DisplayName *string `locationName:"displayName" type:"string"`
// Enables auto building for the branch.
EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"`
// Enables basic authorization for the branch.
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"`
// Enables notifications for the branch.
EnableNotification *bool `locationName:"enableNotification" type:"boolean"`
// Enables performance mode for the branch.
//
// Performance mode optimizes for faster hosting performance by keeping content
// cached at the edge for a longer interval. When performance mode is enabled,
// hosting configuration or code changes can take up to 10 minutes to roll out.
EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"`
// Enables pull request previews for this branch.
EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"`
// The environment variables for the branch.
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"`
// The framework for the branch.
Framework *string `locationName:"framework" type:"string"`
// The Amplify environment name for the pull request.
PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"`
// Describes the current stage for the branch.
Stage *string `locationName:"stage" type:"string" enum:"Stage"`
// The tag for the branch.
Tags map[string]*string `locationName:"tags" min:"1" type:"map"`
// The content Time To Live (TTL) for the website in seconds.
Ttl *string `locationName:"ttl" type:"string"`
}
// String returns the string representation
func (s CreateBranchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBranchInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateBranchInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateBranchInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BackendEnvironmentArn != nil && len(*s.BackendEnvironmentArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackendEnvironmentArn", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.BuildSpec != nil && len(*s.BuildSpec) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1))
}
if s.Tags != nil && len(s.Tags) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *CreateBranchInput) SetAppId(v string) *CreateBranchInput {
s.AppId = &v
return s
}
// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value.
func (s *CreateBranchInput) SetBackendEnvironmentArn(v string) *CreateBranchInput {
s.BackendEnvironmentArn = &v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *CreateBranchInput) SetBasicAuthCredentials(v string) *CreateBranchInput {
s.BasicAuthCredentials = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *CreateBranchInput) SetBranchName(v string) *CreateBranchInput {
s.BranchName = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *CreateBranchInput) SetBuildSpec(v string) *CreateBranchInput {
s.BuildSpec = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateBranchInput) SetDescription(v string) *CreateBranchInput {
s.Description = &v
return s
}
// SetDisplayName sets the DisplayName field's value.
func (s *CreateBranchInput) SetDisplayName(v string) *CreateBranchInput {
s.DisplayName = &v
return s
}
// SetEnableAutoBuild sets the EnableAutoBuild field's value.
func (s *CreateBranchInput) SetEnableAutoBuild(v bool) *CreateBranchInput {
s.EnableAutoBuild = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *CreateBranchInput) SetEnableBasicAuth(v bool) *CreateBranchInput {
s.EnableBasicAuth = &v
return s
}
// SetEnableNotification sets the EnableNotification field's value.
func (s *CreateBranchInput) SetEnableNotification(v bool) *CreateBranchInput {
s.EnableNotification = &v
return s
}
// SetEnablePerformanceMode sets the EnablePerformanceMode field's value.
func (s *CreateBranchInput) SetEnablePerformanceMode(v bool) *CreateBranchInput {
s.EnablePerformanceMode = &v
return s
}
// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value.
func (s *CreateBranchInput) SetEnablePullRequestPreview(v bool) *CreateBranchInput {
s.EnablePullRequestPreview = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *CreateBranchInput) SetEnvironmentVariables(v map[string]*string) *CreateBranchInput {
s.EnvironmentVariables = v
return s
}
// SetFramework sets the Framework field's value.
func (s *CreateBranchInput) SetFramework(v string) *CreateBranchInput {
s.Framework = &v
return s
}
// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value.
func (s *CreateBranchInput) SetPullRequestEnvironmentName(v string) *CreateBranchInput {
s.PullRequestEnvironmentName = &v
return s
}
// SetStage sets the Stage field's value.
func (s *CreateBranchInput) SetStage(v string) *CreateBranchInput {
s.Stage = &v
return s
}
// SetTags sets the Tags field's value.
func (s *CreateBranchInput) SetTags(v map[string]*string) *CreateBranchInput {
s.Tags = v
return s
}
// SetTtl sets the Ttl field's value.
func (s *CreateBranchInput) SetTtl(v string) *CreateBranchInput {
s.Ttl = &v
return s
}
// The result structure for create branch request.
type CreateBranchOutput struct {
_ struct{} `type:"structure"`
// Describes the branch for an Amplify app, which maps to a third-party repository
// branch.
//
// Branch is a required field
Branch *Branch `locationName:"branch" type:"structure" required:"true"`
}
// String returns the string representation
func (s CreateBranchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBranchOutput) GoString() string {
return s.String()
}
// SetBranch sets the Branch field's value.
func (s *CreateBranchOutput) SetBranch(v *Branch) *CreateBranchOutput {
s.Branch = v
return s
}
// The request structure for the create a new deployment request.
type CreateDeploymentInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch, for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// An optional file map that contains the file name as the key and the file
// content md5 hash as the value. If this argument is provided, the service
// will generate a unique upload URL per file. Otherwise, the service will only
// generate a single upload URL for the zipped files.
FileMap map[string]*string `locationName:"fileMap" type:"map"`
}
// String returns the string representation
func (s CreateDeploymentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDeploymentInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateDeploymentInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateDeploymentInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *CreateDeploymentInput) SetAppId(v string) *CreateDeploymentInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *CreateDeploymentInput) SetBranchName(v string) *CreateDeploymentInput {
s.BranchName = &v
return s
}
// SetFileMap sets the FileMap field's value.
func (s *CreateDeploymentInput) SetFileMap(v map[string]*string) *CreateDeploymentInput {
s.FileMap = v
return s
}
// The result structure for the create a new deployment request.
type CreateDeploymentOutput struct {
_ struct{} `type:"structure"`
// When the fileMap argument is provided in the request, fileUploadUrls will
// contain a map of file names to upload URLs.
//
// FileUploadUrls is a required field
FileUploadUrls map[string]*string `locationName:"fileUploadUrls" type:"map" required:"true"`
// The job ID for this deployment. will supply to start deployment api.
JobId *string `locationName:"jobId" type:"string"`
// When the fileMap argument is not provided in the request, this zipUploadUrl
// is returned.
//
// ZipUploadUrl is a required field
ZipUploadUrl *string `locationName:"zipUploadUrl" type:"string" required:"true"`
}
// String returns the string representation
func (s CreateDeploymentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDeploymentOutput) GoString() string {
return s.String()
}
// SetFileUploadUrls sets the FileUploadUrls field's value.
func (s *CreateDeploymentOutput) SetFileUploadUrls(v map[string]*string) *CreateDeploymentOutput {
s.FileUploadUrls = v
return s
}
// SetJobId sets the JobId field's value.
func (s *CreateDeploymentOutput) SetJobId(v string) *CreateDeploymentOutput {
s.JobId = &v
return s
}
// SetZipUploadUrl sets the ZipUploadUrl field's value.
func (s *CreateDeploymentOutput) SetZipUploadUrl(v string) *CreateDeploymentOutput {
s.ZipUploadUrl = &v
return s
}
// The request structure for the create domain association request.
type CreateDomainAssociationInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// Sets the branch patterns for automatic subdomain creation.
AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"`
// The required AWS Identity and Access Management (IAM) service role for the
// Amazon Resource Name (ARN) for automatically creating subdomains.
AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"`
// The domain name for the domain association.
//
// DomainName is a required field
DomainName *string `locationName:"domainName" type:"string" required:"true"`
// Enables the automated creation of subdomains for branches.
EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"`
// The setting for the subdomain.
//
// SubDomainSettings is a required field
SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"`
}
// String returns the string representation
func (s CreateDomainAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDomainAssociationInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateDomainAssociationInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateDomainAssociationInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DomainName == nil {
invalidParams.Add(request.NewErrParamRequired("DomainName"))
}
if s.SubDomainSettings == nil {
invalidParams.Add(request.NewErrParamRequired("SubDomainSettings"))
}
if s.SubDomainSettings != nil {
for i, v := range s.SubDomainSettings {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SubDomainSettings", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *CreateDomainAssociationInput) SetAppId(v string) *CreateDomainAssociationInput {
s.AppId = &v
return s
}
// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value.
func (s *CreateDomainAssociationInput) SetAutoSubDomainCreationPatterns(v []*string) *CreateDomainAssociationInput {
s.AutoSubDomainCreationPatterns = v
return s
}
// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value.
func (s *CreateDomainAssociationInput) SetAutoSubDomainIAMRole(v string) *CreateDomainAssociationInput {
s.AutoSubDomainIAMRole = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *CreateDomainAssociationInput) SetDomainName(v string) *CreateDomainAssociationInput {
s.DomainName = &v
return s
}
// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value.
func (s *CreateDomainAssociationInput) SetEnableAutoSubDomain(v bool) *CreateDomainAssociationInput {
s.EnableAutoSubDomain = &v
return s
}
// SetSubDomainSettings sets the SubDomainSettings field's value.
func (s *CreateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSetting) *CreateDomainAssociationInput {
s.SubDomainSettings = v
return s
}
// The result structure for the create domain association request.
type CreateDomainAssociationOutput struct {
_ struct{} `type:"structure"`
// Describes the structure of a domain association, which associates a custom
// domain with an Amplify app.
//
// DomainAssociation is a required field
DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"`
}
// String returns the string representation
func (s CreateDomainAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDomainAssociationOutput) GoString() string {
return s.String()
}
// SetDomainAssociation sets the DomainAssociation field's value.
func (s *CreateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *CreateDomainAssociationOutput {
s.DomainAssociation = v
return s
}
// The request structure for the create webhook request.
type CreateWebhookInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for a branch that is part of an Amplify app.
//
// BranchName is a required field
BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"`
// The description for a webhook.
Description *string `locationName:"description" type:"string"`
}
// String returns the string representation
func (s CreateWebhookInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateWebhookInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateWebhookInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateWebhookInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *CreateWebhookInput) SetAppId(v string) *CreateWebhookInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *CreateWebhookInput) SetBranchName(v string) *CreateWebhookInput {
s.BranchName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateWebhookInput) SetDescription(v string) *CreateWebhookInput {
s.Description = &v
return s
}
// The result structure for the create webhook request.
type CreateWebhookOutput struct {
_ struct{} `type:"structure"`
// Describes a webhook that connects repository events to an Amplify app.
//
// Webhook is a required field
Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"`
}
// String returns the string representation
func (s CreateWebhookOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateWebhookOutput) GoString() string {
return s.String()
}
// SetWebhook sets the Webhook field's value.
func (s *CreateWebhookOutput) SetWebhook(v *Webhook) *CreateWebhookOutput {
s.Webhook = v
return s
}
// Describes a custom rewrite or redirect rule.
type CustomRule struct {
_ struct{} `type:"structure"`
// The condition for a URL rewrite or redirect rule, such as a country code.
Condition *string `locationName:"condition" min:"1" type:"string"`
// The source pattern for a URL rewrite or redirect rule.
//
// Source is a required field
Source *string `locationName:"source" min:"1" type:"string" required:"true"`
// The status code for a URL rewrite or redirect rule.
//
// 200
//
// Represents a 200 rewrite rule.
//
// 301
//
// Represents a 301 (moved pemanently) redirect rule. This and all future requests
// should be directed to the target URL.
//
// 302
//
// Represents a 302 temporary redirect rule.
//
// 404
//
// Represents a 404 redirect rule.
//
// 404-200
//
// Represents a 404 rewrite rule.
Status *string `locationName:"status" min:"3" type:"string"`
// The target pattern for a URL rewrite or redirect rule.
//
// Target is a required field
Target *string `locationName:"target" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s CustomRule) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CustomRule) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CustomRule) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CustomRule"}
if s.Condition != nil && len(*s.Condition) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Condition", 1))
}
if s.Source == nil {
invalidParams.Add(request.NewErrParamRequired("Source"))
}
if s.Source != nil && len(*s.Source) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Source", 1))
}
if s.Status != nil && len(*s.Status) < 3 {
invalidParams.Add(request.NewErrParamMinLen("Status", 3))
}
if s.Target == nil {
invalidParams.Add(request.NewErrParamRequired("Target"))
}
if s.Target != nil && len(*s.Target) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Target", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCondition sets the Condition field's value.
func (s *CustomRule) SetCondition(v string) *CustomRule {
s.Condition = &v
return s
}
// SetSource sets the Source field's value.
func (s *CustomRule) SetSource(v string) *CustomRule {
s.Source = &v
return s
}
// SetStatus sets the Status field's value.
func (s *CustomRule) SetStatus(v string) *CustomRule {
s.Status = &v
return s
}
// SetTarget sets the Target field's value.
func (s *CustomRule) SetTarget(v string) *CustomRule {
s.Target = &v
return s
}
// Describes the request structure for the delete app request.
type DeleteAppInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteAppInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteAppInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteAppInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteAppInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *DeleteAppInput) SetAppId(v string) *DeleteAppInput {
s.AppId = &v
return s
}
// The result structure for the delete app request.
type DeleteAppOutput struct {
_ struct{} `type:"structure"`
// Represents the different branches of a repository for building, deploying,
// and hosting an Amplify app.
//
// App is a required field
App *App `locationName:"app" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteAppOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteAppOutput) GoString() string {
return s.String()
}
// SetApp sets the App field's value.
func (s *DeleteAppOutput) SetApp(v *App) *DeleteAppOutput {
s.App = v
return s
}
// The request structure for the delete backend environment request.
type DeleteBackendEnvironmentInput struct {
_ struct{} `type:"structure"`
// The unique ID of an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of a backend environment of an Amplify app.
//
// EnvironmentName is a required field
EnvironmentName *string `location:"uri" locationName:"environmentName" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackendEnvironmentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackendEnvironmentInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackendEnvironmentInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackendEnvironmentInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.EnvironmentName == nil {
invalidParams.Add(request.NewErrParamRequired("EnvironmentName"))
}
if s.EnvironmentName != nil && len(*s.EnvironmentName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *DeleteBackendEnvironmentInput) SetAppId(v string) *DeleteBackendEnvironmentInput {
s.AppId = &v
return s
}
// SetEnvironmentName sets the EnvironmentName field's value.
func (s *DeleteBackendEnvironmentInput) SetEnvironmentName(v string) *DeleteBackendEnvironmentInput {
s.EnvironmentName = &v
return s
}
// The result structure of the delete backend environment result.
type DeleteBackendEnvironmentOutput struct {
_ struct{} `type:"structure"`
// Describes the backend environment for an Amplify app.
//
// BackendEnvironment is a required field
BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteBackendEnvironmentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackendEnvironmentOutput) GoString() string {
return s.String()
}
// SetBackendEnvironment sets the BackendEnvironment field's value.
func (s *DeleteBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnvironment) *DeleteBackendEnvironmentOutput {
s.BackendEnvironment = v
return s
}
// The request structure for the delete branch request.
type DeleteBranchInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBranchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBranchInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBranchInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBranchInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *DeleteBranchInput) SetAppId(v string) *DeleteBranchInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput {
s.BranchName = &v
return s
}
// The result structure for the delete branch request.
type DeleteBranchOutput struct {
_ struct{} `type:"structure"`
// The branch for an Amplify app, which maps to a third-party repository branch.
//
// Branch is a required field
Branch *Branch `locationName:"branch" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteBranchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBranchOutput) GoString() string {
return s.String()
}
// SetBranch sets the Branch field's value.
func (s *DeleteBranchOutput) SetBranch(v *Branch) *DeleteBranchOutput {
s.Branch = v
return s
}
// The request structure for the delete domain association request.
type DeleteDomainAssociationInput struct {
_ struct{} `type:"structure"`
// The unique id for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of the domain.
//
// DomainName is a required field
DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteDomainAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDomainAssociationInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteDomainAssociationInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteDomainAssociationInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DomainName == nil {
invalidParams.Add(request.NewErrParamRequired("DomainName"))
}
if s.DomainName != nil && len(*s.DomainName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DomainName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *DeleteDomainAssociationInput) SetAppId(v string) *DeleteDomainAssociationInput {
s.AppId = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *DeleteDomainAssociationInput) SetDomainName(v string) *DeleteDomainAssociationInput {
s.DomainName = &v
return s
}
type DeleteDomainAssociationOutput struct {
_ struct{} `type:"structure"`
// Describes a domain association that associates a custom domain with an Amplify
// app.
//
// DomainAssociation is a required field
DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteDomainAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDomainAssociationOutput) GoString() string {
return s.String()
}
// SetDomainAssociation sets the DomainAssociation field's value.
func (s *DeleteDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *DeleteDomainAssociationOutput {
s.DomainAssociation = v
return s
}
// The request structure for the delete job request.
type DeleteJobInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch, for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The unique ID for the job.
//
// JobId is a required field
JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteJobInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *DeleteJobInput) SetAppId(v string) *DeleteJobInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *DeleteJobInput) SetBranchName(v string) *DeleteJobInput {
s.BranchName = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *DeleteJobInput) SetJobId(v string) *DeleteJobInput {
s.JobId = &v
return s
}
// The result structure for the delete job request.
type DeleteJobOutput struct {
_ struct{} `type:"structure"`
// Describes the summary for an execution job for an Amplify app.
//
// JobSummary is a required field
JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteJobOutput) GoString() string {
return s.String()
}
// SetJobSummary sets the JobSummary field's value.
func (s *DeleteJobOutput) SetJobSummary(v *JobSummary) *DeleteJobOutput {
s.JobSummary = v
return s
}
// The request structure for the delete webhook request.
type DeleteWebhookInput struct {
_ struct{} `type:"structure"`
// The unique ID for a webhook.
//
// WebhookId is a required field
WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteWebhookInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteWebhookInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteWebhookInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteWebhookInput"}
if s.WebhookId == nil {
invalidParams.Add(request.NewErrParamRequired("WebhookId"))
}
if s.WebhookId != nil && len(*s.WebhookId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetWebhookId sets the WebhookId field's value.
func (s *DeleteWebhookInput) SetWebhookId(v string) *DeleteWebhookInput {
s.WebhookId = &v
return s
}
// The result structure for the delete webhook request.
type DeleteWebhookOutput struct {
_ struct{} `type:"structure"`
// Describes a webhook that connects repository events to an Amplify app.
//
// Webhook is a required field
Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"`
}
// String returns the string representation
func (s DeleteWebhookOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteWebhookOutput) GoString() string {
return s.String()
}
// SetWebhook sets the Webhook field's value.
func (s *DeleteWebhookOutput) SetWebhook(v *Webhook) *DeleteWebhookOutput {
s.Webhook = v
return s
}
// An operation failed because a dependent service threw an exception.
type DependentServiceFailureException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s DependentServiceFailureException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DependentServiceFailureException) GoString() string {
return s.String()
}
func newErrorDependentServiceFailureException(v protocol.ResponseMetadata) error {
return &DependentServiceFailureException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *DependentServiceFailureException) Code() string {
return "DependentServiceFailureException"
}
// Message returns the exception's message.
func (s *DependentServiceFailureException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *DependentServiceFailureException) OrigErr() error {
return nil
}
func (s *DependentServiceFailureException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *DependentServiceFailureException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *DependentServiceFailureException) RequestID() string {
return s.RespMetadata.RequestID
}
// Describes a domain association that associates a custom domain with an Amplify
// app.
type DomainAssociation struct {
_ struct{} `type:"structure"`
// Sets branch patterns for automatic subdomain creation.
AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"`
// The required AWS Identity and Access Management (IAM) service role for the
// Amazon Resource Name (ARN) for automatically creating subdomains.
AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"`
// The DNS record for certificate verification.
CertificateVerificationDNSRecord *string `locationName:"certificateVerificationDNSRecord" type:"string"`
// The Amazon Resource Name (ARN) for the domain association.
//
// DomainAssociationArn is a required field
DomainAssociationArn *string `locationName:"domainAssociationArn" type:"string" required:"true"`
// The name of the domain.
//
// DomainName is a required field
DomainName *string `locationName:"domainName" type:"string" required:"true"`
// The current status of the domain association.
//
// DomainStatus is a required field
DomainStatus *string `locationName:"domainStatus" type:"string" required:"true" enum:"DomainStatus"`
// Enables the automated creation of subdomains for branches.
//
// EnableAutoSubDomain is a required field
EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean" required:"true"`
// The reason for the current status of the domain association.
//
// StatusReason is a required field
StatusReason *string `locationName:"statusReason" type:"string" required:"true"`
// The subdomains for the domain association.
//
// SubDomains is a required field
SubDomains []*SubDomain `locationName:"subDomains" type:"list" required:"true"`
}
// String returns the string representation
func (s DomainAssociation) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DomainAssociation) GoString() string {
return s.String()
}
// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value.
func (s *DomainAssociation) SetAutoSubDomainCreationPatterns(v []*string) *DomainAssociation {
s.AutoSubDomainCreationPatterns = v
return s
}
// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value.
func (s *DomainAssociation) SetAutoSubDomainIAMRole(v string) *DomainAssociation {
s.AutoSubDomainIAMRole = &v
return s
}
// SetCertificateVerificationDNSRecord sets the CertificateVerificationDNSRecord field's value.
func (s *DomainAssociation) SetCertificateVerificationDNSRecord(v string) *DomainAssociation {
s.CertificateVerificationDNSRecord = &v
return s
}
// SetDomainAssociationArn sets the DomainAssociationArn field's value.
func (s *DomainAssociation) SetDomainAssociationArn(v string) *DomainAssociation {
s.DomainAssociationArn = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *DomainAssociation) SetDomainName(v string) *DomainAssociation {
s.DomainName = &v
return s
}
// SetDomainStatus sets the DomainStatus field's value.
func (s *DomainAssociation) SetDomainStatus(v string) *DomainAssociation {
s.DomainStatus = &v
return s
}
// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value.
func (s *DomainAssociation) SetEnableAutoSubDomain(v bool) *DomainAssociation {
s.EnableAutoSubDomain = &v
return s
}
// SetStatusReason sets the StatusReason field's value.
func (s *DomainAssociation) SetStatusReason(v string) *DomainAssociation {
s.StatusReason = &v
return s
}
// SetSubDomains sets the SubDomains field's value.
func (s *DomainAssociation) SetSubDomains(v []*SubDomain) *DomainAssociation {
s.SubDomains = v
return s
}
// The request structure for the generate access logs request.
type GenerateAccessLogsInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of the domain.
//
// DomainName is a required field
DomainName *string `locationName:"domainName" type:"string" required:"true"`
// The time at which the logs should end. The time range specified is inclusive
// of the end time.
EndTime *time.Time `locationName:"endTime" type:"timestamp"`
// The time at which the logs should start. The time range specified is inclusive
// of the start time.
StartTime *time.Time `locationName:"startTime" type:"timestamp"`
}
// String returns the string representation
func (s GenerateAccessLogsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GenerateAccessLogsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GenerateAccessLogsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GenerateAccessLogsInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DomainName == nil {
invalidParams.Add(request.NewErrParamRequired("DomainName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GenerateAccessLogsInput) SetAppId(v string) *GenerateAccessLogsInput {
s.AppId = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *GenerateAccessLogsInput) SetDomainName(v string) *GenerateAccessLogsInput {
s.DomainName = &v
return s
}
// SetEndTime sets the EndTime field's value.
func (s *GenerateAccessLogsInput) SetEndTime(v time.Time) *GenerateAccessLogsInput {
s.EndTime = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *GenerateAccessLogsInput) SetStartTime(v time.Time) *GenerateAccessLogsInput {
s.StartTime = &v
return s
}
// The result structure for the generate access logs request.
type GenerateAccessLogsOutput struct {
_ struct{} `type:"structure"`
// The pre-signed URL for the requested access logs.
LogUrl *string `locationName:"logUrl" type:"string"`
}
// String returns the string representation
func (s GenerateAccessLogsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GenerateAccessLogsOutput) GoString() string {
return s.String()
}
// SetLogUrl sets the LogUrl field's value.
func (s *GenerateAccessLogsOutput) SetLogUrl(v string) *GenerateAccessLogsOutput {
s.LogUrl = &v
return s
}
// The request structure for the get app request.
type GetAppInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetAppInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetAppInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetAppInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetAppInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GetAppInput) SetAppId(v string) *GetAppInput {
s.AppId = &v
return s
}
type GetAppOutput struct {
_ struct{} `type:"structure"`
// Represents the different branches of a repository for building, deploying,
// and hosting an Amplify app.
//
// App is a required field
App *App `locationName:"app" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetAppOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetAppOutput) GoString() string {
return s.String()
}
// SetApp sets the App field's value.
func (s *GetAppOutput) SetApp(v *App) *GetAppOutput {
s.App = v
return s
}
// Returns the request structure for the get artifact request.
type GetArtifactUrlInput struct {
_ struct{} `type:"structure"`
// The unique ID for an artifact.
//
// ArtifactId is a required field
ArtifactId *string `location:"uri" locationName:"artifactId" type:"string" required:"true"`
}
// String returns the string representation
func (s GetArtifactUrlInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetArtifactUrlInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetArtifactUrlInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetArtifactUrlInput"}
if s.ArtifactId == nil {
invalidParams.Add(request.NewErrParamRequired("ArtifactId"))
}
if s.ArtifactId != nil && len(*s.ArtifactId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ArtifactId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetArtifactId sets the ArtifactId field's value.
func (s *GetArtifactUrlInput) SetArtifactId(v string) *GetArtifactUrlInput {
s.ArtifactId = &v
return s
}
// Returns the result structure for the get artifact request.
type GetArtifactUrlOutput struct {
_ struct{} `type:"structure"`
// The unique ID for an artifact.
//
// ArtifactId is a required field
ArtifactId *string `locationName:"artifactId" type:"string" required:"true"`
// The presigned URL for the artifact.
//
// ArtifactUrl is a required field
ArtifactUrl *string `locationName:"artifactUrl" type:"string" required:"true"`
}
// String returns the string representation
func (s GetArtifactUrlOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetArtifactUrlOutput) GoString() string {
return s.String()
}
// SetArtifactId sets the ArtifactId field's value.
func (s *GetArtifactUrlOutput) SetArtifactId(v string) *GetArtifactUrlOutput {
s.ArtifactId = &v
return s
}
// SetArtifactUrl sets the ArtifactUrl field's value.
func (s *GetArtifactUrlOutput) SetArtifactUrl(v string) *GetArtifactUrlOutput {
s.ArtifactUrl = &v
return s
}
// The request structure for the get backend environment request.
type GetBackendEnvironmentInput struct {
_ struct{} `type:"structure"`
// The unique id for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the backend environment.
//
// EnvironmentName is a required field
EnvironmentName *string `location:"uri" locationName:"environmentName" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackendEnvironmentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackendEnvironmentInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackendEnvironmentInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackendEnvironmentInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.EnvironmentName == nil {
invalidParams.Add(request.NewErrParamRequired("EnvironmentName"))
}
if s.EnvironmentName != nil && len(*s.EnvironmentName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GetBackendEnvironmentInput) SetAppId(v string) *GetBackendEnvironmentInput {
s.AppId = &v
return s
}
// SetEnvironmentName sets the EnvironmentName field's value.
func (s *GetBackendEnvironmentInput) SetEnvironmentName(v string) *GetBackendEnvironmentInput {
s.EnvironmentName = &v
return s
}
// The result structure for the get backend environment result.
type GetBackendEnvironmentOutput struct {
_ struct{} `type:"structure"`
// Describes the backend environment for an Amplify app.
//
// BackendEnvironment is a required field
BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetBackendEnvironmentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackendEnvironmentOutput) GoString() string {
return s.String()
}
// SetBackendEnvironment sets the BackendEnvironment field's value.
func (s *GetBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnvironment) *GetBackendEnvironmentOutput {
s.BackendEnvironment = v
return s
}
// The request structure for the get branch request.
type GetBranchInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBranchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBranchInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBranchInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBranchInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GetBranchInput) SetAppId(v string) *GetBranchInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *GetBranchInput) SetBranchName(v string) *GetBranchInput {
s.BranchName = &v
return s
}
type GetBranchOutput struct {
_ struct{} `type:"structure"`
// The branch for an Amplify app, which maps to a third-party repository branch.
//
// Branch is a required field
Branch *Branch `locationName:"branch" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetBranchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBranchOutput) GoString() string {
return s.String()
}
// SetBranch sets the Branch field's value.
func (s *GetBranchOutput) SetBranch(v *Branch) *GetBranchOutput {
s.Branch = v
return s
}
// The request structure for the get domain association request.
type GetDomainAssociationInput struct {
_ struct{} `type:"structure"`
// The unique id for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of the domain.
//
// DomainName is a required field
DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"`
}
// String returns the string representation
func (s GetDomainAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetDomainAssociationInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetDomainAssociationInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetDomainAssociationInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DomainName == nil {
invalidParams.Add(request.NewErrParamRequired("DomainName"))
}
if s.DomainName != nil && len(*s.DomainName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DomainName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GetDomainAssociationInput) SetAppId(v string) *GetDomainAssociationInput {
s.AppId = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *GetDomainAssociationInput) SetDomainName(v string) *GetDomainAssociationInput {
s.DomainName = &v
return s
}
// The result structure for the get domain association request.
type GetDomainAssociationOutput struct {
_ struct{} `type:"structure"`
// Describes the structure of a domain association, which associates a custom
// domain with an Amplify app.
//
// DomainAssociation is a required field
DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetDomainAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetDomainAssociationOutput) GoString() string {
return s.String()
}
// SetDomainAssociation sets the DomainAssociation field's value.
func (s *GetDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *GetDomainAssociationOutput {
s.DomainAssociation = v
return s
}
// The request structure for the get job request.
type GetJobInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The branch name for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The unique ID for the job.
//
// JobId is a required field
JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"`
}
// String returns the string representation
func (s GetJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetJobInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *GetJobInput) SetAppId(v string) *GetJobInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *GetJobInput) SetBranchName(v string) *GetJobInput {
s.BranchName = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *GetJobInput) SetJobId(v string) *GetJobInput {
s.JobId = &v
return s
}
type GetJobOutput struct {
_ struct{} `type:"structure"`
// Describes an execution job for an Amplify app.
//
// Job is a required field
Job *Job `locationName:"job" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetJobOutput) GoString() string {
return s.String()
}
// SetJob sets the Job field's value.
func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput {
s.Job = v
return s
}
// The request structure for the get webhook request.
type GetWebhookInput struct {
_ struct{} `type:"structure"`
// The unique ID for a webhook.
//
// WebhookId is a required field
WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"`
}
// String returns the string representation
func (s GetWebhookInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetWebhookInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetWebhookInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetWebhookInput"}
if s.WebhookId == nil {
invalidParams.Add(request.NewErrParamRequired("WebhookId"))
}
if s.WebhookId != nil && len(*s.WebhookId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetWebhookId sets the WebhookId field's value.
func (s *GetWebhookInput) SetWebhookId(v string) *GetWebhookInput {
s.WebhookId = &v
return s
}
// The result structure for the get webhook request.
type GetWebhookOutput struct {
_ struct{} `type:"structure"`
// Describes the structure of a webhook.
//
// Webhook is a required field
Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"`
}
// String returns the string representation
func (s GetWebhookOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetWebhookOutput) GoString() string {
return s.String()
}
// SetWebhook sets the Webhook field's value.
func (s *GetWebhookOutput) SetWebhook(v *Webhook) *GetWebhookOutput {
s.Webhook = v
return s
}
// The service failed to perform an operation due to an internal issue.
type InternalFailureException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s InternalFailureException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InternalFailureException) GoString() string {
return s.String()
}
func newErrorInternalFailureException(v protocol.ResponseMetadata) error {
return &InternalFailureException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InternalFailureException) Code() string {
return "InternalFailureException"
}
// Message returns the exception's message.
func (s *InternalFailureException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InternalFailureException) OrigErr() error {
return nil
}
func (s *InternalFailureException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InternalFailureException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InternalFailureException) RequestID() string {
return s.RespMetadata.RequestID
}
// Describes an execution job for an Amplify app.
type Job struct {
_ struct{} `type:"structure"`
// The execution steps for an execution job, for an Amplify app.
//
// Steps is a required field
Steps []*Step `locationName:"steps" type:"list" required:"true"`
// Describes the summary for an execution job for an Amplify app.
//
// Summary is a required field
Summary *JobSummary `locationName:"summary" type:"structure" required:"true"`
}
// String returns the string representation
func (s Job) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Job) GoString() string {
return s.String()
}
// SetSteps sets the Steps field's value.
func (s *Job) SetSteps(v []*Step) *Job {
s.Steps = v
return s
}
// SetSummary sets the Summary field's value.
func (s *Job) SetSummary(v *JobSummary) *Job {
s.Summary = v
return s
}
// Describes the summary for an execution job for an Amplify app.
type JobSummary struct {
_ struct{} `type:"structure"`
// The commit ID from a third-party repository provider for the job.
//
// CommitId is a required field
CommitId *string `locationName:"commitId" type:"string" required:"true"`
// The commit message from a third-party repository provider for the job.
//
// CommitMessage is a required field
CommitMessage *string `locationName:"commitMessage" type:"string" required:"true"`
// The commit date and time for the job.
//
// CommitTime is a required field
CommitTime *time.Time `locationName:"commitTime" type:"timestamp" required:"true"`
// The end date and time for the job.
EndTime *time.Time `locationName:"endTime" type:"timestamp"`
// The Amazon Resource Name (ARN) for the job.
//
// JobArn is a required field
JobArn *string `locationName:"jobArn" type:"string" required:"true"`
// The unique ID for the job.
//
// JobId is a required field
JobId *string `locationName:"jobId" type:"string" required:"true"`
// The type for the job. If the value is RELEASE, the job was manually released
// from its source by using the StartJob API. If the value is RETRY, the job
// was manually retried using the StartJob API. If the value is WEB_HOOK, the
// job was automatically triggered by webhooks.
//
// JobType is a required field
JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"`
// The start date and time for the job.
//
// StartTime is a required field
StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
// The current status for the job.
//
// Status is a required field
Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"`
}
// String returns the string representation
func (s JobSummary) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s JobSummary) GoString() string {
return s.String()
}
// SetCommitId sets the CommitId field's value.
func (s *JobSummary) SetCommitId(v string) *JobSummary {
s.CommitId = &v
return s
}
// SetCommitMessage sets the CommitMessage field's value.
func (s *JobSummary) SetCommitMessage(v string) *JobSummary {
s.CommitMessage = &v
return s
}
// SetCommitTime sets the CommitTime field's value.
func (s *JobSummary) SetCommitTime(v time.Time) *JobSummary {
s.CommitTime = &v
return s
}
// SetEndTime sets the EndTime field's value.
func (s *JobSummary) SetEndTime(v time.Time) *JobSummary {
s.EndTime = &v
return s
}
// SetJobArn sets the JobArn field's value.
func (s *JobSummary) SetJobArn(v string) *JobSummary {
s.JobArn = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *JobSummary) SetJobId(v string) *JobSummary {
s.JobId = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *JobSummary) SetJobType(v string) *JobSummary {
s.JobType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *JobSummary) SetStartTime(v time.Time) *JobSummary {
s.StartTime = &v
return s
}
// SetStatus sets the Status field's value.
func (s *JobSummary) SetStatus(v string) *JobSummary {
s.Status = &v
return s
}
// A resource could not be created because service quotas were exceeded.
type LimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s LimitExceededException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s LimitExceededException) GoString() string {
return s.String()
}
func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
return &LimitExceededException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *LimitExceededException) Code() string {
return "LimitExceededException"
}
// Message returns the exception's message.
func (s *LimitExceededException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *LimitExceededException) OrigErr() error {
return nil
}
func (s *LimitExceededException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *LimitExceededException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *LimitExceededException) RequestID() string {
return s.RespMetadata.RequestID
}
// The request structure for the list apps request.
type ListAppsInput struct {
_ struct{} `type:"structure"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. If non-null, the pagination token is returned in a result.
// Pass its value in another request to retrieve more entries.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListAppsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAppsInput) GoString() string {
return s.String()
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListAppsInput) SetMaxResults(v int64) *ListAppsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListAppsInput) SetNextToken(v string) *ListAppsInput {
s.NextToken = &v
return s
}
// The result structure for an Amplify app list request.
type ListAppsOutput struct {
_ struct{} `type:"structure"`
// A list of Amplify apps.
//
// Apps is a required field
Apps []*App `locationName:"apps" type:"list" required:"true"`
// A pagination token. Set to null to start listing apps from start. If non-null,
// the pagination token is returned in a result. Pass its value in here to list
// more projects.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListAppsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAppsOutput) GoString() string {
return s.String()
}
// SetApps sets the Apps field's value.
func (s *ListAppsOutput) SetApps(v []*App) *ListAppsOutput {
s.Apps = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListAppsOutput) SetNextToken(v string) *ListAppsOutput {
s.NextToken = &v
return s
}
// Describes the request structure for the list artifacts request.
type ListArtifactsInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of a branch that is part of an Amplify app.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The unique ID for a job.
//
// JobId is a required field
JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing artifacts from start. If
// a non-null pagination token is returned in a result, pass its value in here
// to list more artifacts.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListArtifactsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListArtifactsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListArtifactsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListArtifactsInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListArtifactsInput) SetAppId(v string) *ListArtifactsInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *ListArtifactsInput) SetBranchName(v string) *ListArtifactsInput {
s.BranchName = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *ListArtifactsInput) SetJobId(v string) *ListArtifactsInput {
s.JobId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListArtifactsInput) SetMaxResults(v int64) *ListArtifactsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListArtifactsInput) SetNextToken(v string) *ListArtifactsInput {
s.NextToken = &v
return s
}
// The result structure for the list artifacts request.
type ListArtifactsOutput struct {
_ struct{} `type:"structure"`
// A list of artifacts.
//
// Artifacts is a required field
Artifacts []*Artifact `locationName:"artifacts" type:"list" required:"true"`
// A pagination token. If a non-null pagination token is returned in a result,
// pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListArtifactsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListArtifactsOutput) GoString() string {
return s.String()
}
// SetArtifacts sets the Artifacts field's value.
func (s *ListArtifactsOutput) SetArtifacts(v []*Artifact) *ListArtifactsOutput {
s.Artifacts = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListArtifactsOutput) SetNextToken(v string) *ListArtifactsOutput {
s.NextToken = &v
return s
}
// The request structure for the list backend environments request.
type ListBackendEnvironmentsInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name of the backend environment
EnvironmentName *string `location:"querystring" locationName:"environmentName" min:"1" type:"string"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing backend environments from
// the start. If a non-null pagination token is returned in a result, pass its
// value in here to list more backend environments.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackendEnvironmentsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackendEnvironmentsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackendEnvironmentsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackendEnvironmentsInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.EnvironmentName != nil && len(*s.EnvironmentName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListBackendEnvironmentsInput) SetAppId(v string) *ListBackendEnvironmentsInput {
s.AppId = &v
return s
}
// SetEnvironmentName sets the EnvironmentName field's value.
func (s *ListBackendEnvironmentsInput) SetEnvironmentName(v string) *ListBackendEnvironmentsInput {
s.EnvironmentName = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackendEnvironmentsInput) SetMaxResults(v int64) *ListBackendEnvironmentsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackendEnvironmentsInput) SetNextToken(v string) *ListBackendEnvironmentsInput {
s.NextToken = &v
return s
}
// The result structure for the list backend environments result.
type ListBackendEnvironmentsOutput struct {
_ struct{} `type:"structure"`
// The list of backend environments for an Amplify app.
//
// BackendEnvironments is a required field
BackendEnvironments []*BackendEnvironment `locationName:"backendEnvironments" type:"list" required:"true"`
// A pagination token. If a non-null pagination token is returned in a result,
// pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackendEnvironmentsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackendEnvironmentsOutput) GoString() string {
return s.String()
}
// SetBackendEnvironments sets the BackendEnvironments field's value.
func (s *ListBackendEnvironmentsOutput) SetBackendEnvironments(v []*BackendEnvironment) *ListBackendEnvironmentsOutput {
s.BackendEnvironments = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackendEnvironmentsOutput) SetNextToken(v string) *ListBackendEnvironmentsOutput {
s.NextToken = &v
return s
}
// The request structure for the list branches request.
type ListBranchesInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing branches from the start.
// If a non-null pagination token is returned in a result, pass its value in
// here to list more branches.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBranchesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBranchesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBranchesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBranchesInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListBranchesInput) SetAppId(v string) *ListBranchesInput {
s.AppId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBranchesInput) SetMaxResults(v int64) *ListBranchesInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBranchesInput) SetNextToken(v string) *ListBranchesInput {
s.NextToken = &v
return s
}
// The result structure for the list branches request.
type ListBranchesOutput struct {
_ struct{} `type:"structure"`
// A list of branches for an Amplify app.
//
// Branches is a required field
Branches []*Branch `locationName:"branches" type:"list" required:"true"`
// A pagination token. If a non-null pagination token is returned in a result,
// pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBranchesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBranchesOutput) GoString() string {
return s.String()
}
// SetBranches sets the Branches field's value.
func (s *ListBranchesOutput) SetBranches(v []*Branch) *ListBranchesOutput {
s.Branches = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBranchesOutput) SetNextToken(v string) *ListBranchesOutput {
s.NextToken = &v
return s
}
// The request structure for the list domain associations request.
type ListDomainAssociationsInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing apps from the start. If
// non-null, a pagination token is returned in a result. Pass its value in here
// to list more projects.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListDomainAssociationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDomainAssociationsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListDomainAssociationsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListDomainAssociationsInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListDomainAssociationsInput) SetAppId(v string) *ListDomainAssociationsInput {
s.AppId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListDomainAssociationsInput) SetMaxResults(v int64) *ListDomainAssociationsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListDomainAssociationsInput) SetNextToken(v string) *ListDomainAssociationsInput {
s.NextToken = &v
return s
}
// The result structure for the list domain association request.
type ListDomainAssociationsOutput struct {
_ struct{} `type:"structure"`
// A list of domain associations.
//
// DomainAssociations is a required field
DomainAssociations []*DomainAssociation `locationName:"domainAssociations" type:"list" required:"true"`
// A pagination token. If non-null, a pagination token is returned in a result.
// Pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListDomainAssociationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDomainAssociationsOutput) GoString() string {
return s.String()
}
// SetDomainAssociations sets the DomainAssociations field's value.
func (s *ListDomainAssociationsOutput) SetDomainAssociations(v []*DomainAssociation) *ListDomainAssociationsOutput {
s.DomainAssociations = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListDomainAssociationsOutput) SetNextToken(v string) *ListDomainAssociationsOutput {
s.NextToken = &v
return s
}
// The request structure for the list jobs request.
type ListJobsInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for a branch.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing steps from the start. If
// a non-null pagination token is returned in a result, pass its value in here
// to list more steps.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListJobsInput) SetAppId(v string) *ListJobsInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *ListJobsInput) SetBranchName(v string) *ListJobsInput {
s.BranchName = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput {
s.NextToken = &v
return s
}
// The maximum number of records to list in a single response.
type ListJobsOutput struct {
_ struct{} `type:"structure"`
// The result structure for the list job result request.
//
// JobSummaries is a required field
JobSummaries []*JobSummary `locationName:"jobSummaries" type:"list" required:"true"`
// A pagination token. If non-null the pagination token is returned in a result.
// Pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListJobsOutput) GoString() string {
return s.String()
}
// SetJobSummaries sets the JobSummaries field's value.
func (s *ListJobsOutput) SetJobSummaries(v []*JobSummary) *ListJobsOutput {
s.JobSummaries = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput {
s.NextToken = &v
return s
}
// The request structure to use to list tags for a resource.
type ListTagsForResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) to use to list tags.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
}
// String returns the string representation
func (s ListTagsForResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListTagsForResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput {
s.ResourceArn = &v
return s
}
// The response for the list tags for resource request.
type ListTagsForResourceOutput struct {
_ struct{} `type:"structure"`
// A list of tags for the specified The Amazon Resource Name (ARN).
Tags map[string]*string `locationName:"tags" min:"1" type:"map"`
}
// String returns the string representation
func (s ListTagsForResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForResourceOutput) GoString() string {
return s.String()
}
// SetTags sets the Tags field's value.
func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput {
s.Tags = v
return s
}
// The request structure for the list webhooks request.
type ListWebhooksInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The maximum number of records to list in a single response.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
// A pagination token. Set to null to start listing webhooks from the start.
// If non-null,the pagination token is returned in a result. Pass its value
// in here to list more webhooks.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListWebhooksInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListWebhooksInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListWebhooksInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListWebhooksInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *ListWebhooksInput) SetAppId(v string) *ListWebhooksInput {
s.AppId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListWebhooksInput) SetMaxResults(v int64) *ListWebhooksInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListWebhooksInput) SetNextToken(v string) *ListWebhooksInput {
s.NextToken = &v
return s
}
// The result structure for the list webhooks request.
type ListWebhooksOutput struct {
_ struct{} `type:"structure"`
// A pagination token. If non-null, the pagination token is returned in a result.
// Pass its value in another request to retrieve more entries.
NextToken *string `locationName:"nextToken" type:"string"`
// A list of webhooks.
//
// Webhooks is a required field
Webhooks []*Webhook `locationName:"webhooks" type:"list" required:"true"`
}
// String returns the string representation
func (s ListWebhooksOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListWebhooksOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListWebhooksOutput) SetNextToken(v string) *ListWebhooksOutput {
s.NextToken = &v
return s
}
// SetWebhooks sets the Webhooks field's value.
func (s *ListWebhooksOutput) SetWebhooks(v []*Webhook) *ListWebhooksOutput {
s.Webhooks = v
return s
}
// An entity was not found during an operation.
type NotFoundException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s NotFoundException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NotFoundException) GoString() string {
return s.String()
}
func newErrorNotFoundException(v protocol.ResponseMetadata) error {
return &NotFoundException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *NotFoundException) Code() string {
return "NotFoundException"
}
// Message returns the exception's message.
func (s *NotFoundException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *NotFoundException) OrigErr() error {
return nil
}
func (s *NotFoundException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *NotFoundException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *NotFoundException) RequestID() string {
return s.RespMetadata.RequestID
}
// Describes the information about a production branch for an Amplify app.
type ProductionBranch struct {
_ struct{} `type:"structure"`
// The branch name for the production branch.
BranchName *string `locationName:"branchName" min:"1" type:"string"`
// The last deploy time of the production branch.
LastDeployTime *time.Time `locationName:"lastDeployTime" type:"timestamp"`
// The status of the production branch.
Status *string `locationName:"status" min:"3" type:"string"`
// The thumbnail URL for the production branch.
ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"`
}
// String returns the string representation
func (s ProductionBranch) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ProductionBranch) GoString() string {
return s.String()
}
// SetBranchName sets the BranchName field's value.
func (s *ProductionBranch) SetBranchName(v string) *ProductionBranch {
s.BranchName = &v
return s
}
// SetLastDeployTime sets the LastDeployTime field's value.
func (s *ProductionBranch) SetLastDeployTime(v time.Time) *ProductionBranch {
s.LastDeployTime = &v
return s
}
// SetStatus sets the Status field's value.
func (s *ProductionBranch) SetStatus(v string) *ProductionBranch {
s.Status = &v
return s
}
// SetThumbnailUrl sets the ThumbnailUrl field's value.
func (s *ProductionBranch) SetThumbnailUrl(v string) *ProductionBranch {
s.ThumbnailUrl = &v
return s
}
// An operation failed due to a non-existent resource.
type ResourceNotFoundException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"code" type:"string"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s ResourceNotFoundException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ResourceNotFoundException) GoString() string {
return s.String()
}
func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
return &ResourceNotFoundException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ResourceNotFoundException) Code() string {
return "ResourceNotFoundException"
}
// Message returns the exception's message.
func (s *ResourceNotFoundException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ResourceNotFoundException) OrigErr() error {
return nil
}
func (s *ResourceNotFoundException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ResourceNotFoundException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ResourceNotFoundException) RequestID() string {
return s.RespMetadata.RequestID
}
// The request structure for the start a deployment request.
type StartDeploymentInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch, for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The job ID for this deployment, generated by the create deployment request.
JobId *string `locationName:"jobId" type:"string"`
// The source URL for this deployment, used when calling start deployment without
// create deployment. The source URL can be any HTTP GET URL that is publicly
// accessible and downloads a single .zip file.
SourceUrl *string `locationName:"sourceUrl" type:"string"`
}
// String returns the string representation
func (s StartDeploymentInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartDeploymentInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartDeploymentInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartDeploymentInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *StartDeploymentInput) SetAppId(v string) *StartDeploymentInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *StartDeploymentInput) SetBranchName(v string) *StartDeploymentInput {
s.BranchName = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *StartDeploymentInput) SetJobId(v string) *StartDeploymentInput {
s.JobId = &v
return s
}
// SetSourceUrl sets the SourceUrl field's value.
func (s *StartDeploymentInput) SetSourceUrl(v string) *StartDeploymentInput {
s.SourceUrl = &v
return s
}
// The result structure for the start a deployment request.
type StartDeploymentOutput struct {
_ struct{} `type:"structure"`
// The summary for the job.
//
// JobSummary is a required field
JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"`
}
// String returns the string representation
func (s StartDeploymentOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartDeploymentOutput) GoString() string {
return s.String()
}
// SetJobSummary sets the JobSummary field's value.
func (s *StartDeploymentOutput) SetJobSummary(v *JobSummary) *StartDeploymentOutput {
s.JobSummary = v
return s
}
// The request structure for the start job request.
type StartJobInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The branch name for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The commit ID from a third-party repository provider for the job.
CommitId *string `locationName:"commitId" type:"string"`
// The commit message from a third-party repository provider for the job.
CommitMessage *string `locationName:"commitMessage" type:"string"`
// The commit date and time for the job.
CommitTime *time.Time `locationName:"commitTime" type:"timestamp"`
// The unique ID for an existing job. This is required if the value of jobType
// is RETRY.
JobId *string `locationName:"jobId" type:"string"`
// A descriptive reason for starting this job.
JobReason *string `locationName:"jobReason" type:"string"`
// Describes the type for the job. The job type RELEASE starts a new job with
// the latest change from the specified branch. This value is available only
// for apps that are connected to a repository. The job type RETRY retries an
// existing job. If the job type value is RETRY, the jobId is also required.
//
// JobType is a required field
JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"`
}
// String returns the string representation
func (s StartJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartJobInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.JobType == nil {
invalidParams.Add(request.NewErrParamRequired("JobType"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *StartJobInput) SetAppId(v string) *StartJobInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *StartJobInput) SetBranchName(v string) *StartJobInput {
s.BranchName = &v
return s
}
// SetCommitId sets the CommitId field's value.
func (s *StartJobInput) SetCommitId(v string) *StartJobInput {
s.CommitId = &v
return s
}
// SetCommitMessage sets the CommitMessage field's value.
func (s *StartJobInput) SetCommitMessage(v string) *StartJobInput {
s.CommitMessage = &v
return s
}
// SetCommitTime sets the CommitTime field's value.
func (s *StartJobInput) SetCommitTime(v time.Time) *StartJobInput {
s.CommitTime = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *StartJobInput) SetJobId(v string) *StartJobInput {
s.JobId = &v
return s
}
// SetJobReason sets the JobReason field's value.
func (s *StartJobInput) SetJobReason(v string) *StartJobInput {
s.JobReason = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *StartJobInput) SetJobType(v string) *StartJobInput {
s.JobType = &v
return s
}
// The result structure for the run job request.
type StartJobOutput struct {
_ struct{} `type:"structure"`
// The summary for the job.
//
// JobSummary is a required field
JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"`
}
// String returns the string representation
func (s StartJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartJobOutput) GoString() string {
return s.String()
}
// SetJobSummary sets the JobSummary field's value.
func (s *StartJobOutput) SetJobSummary(v *JobSummary) *StartJobOutput {
s.JobSummary = v
return s
}
// Describes an execution step, for an execution job, for an Amplify app.
type Step struct {
_ struct{} `type:"structure"`
// The URL to the artifact for the execution step.
ArtifactsUrl *string `locationName:"artifactsUrl" type:"string"`
// The context for the current step. Includes a build image if the step is build.
Context *string `locationName:"context" type:"string"`
// The end date and time of the execution step.
//
// EndTime is a required field
EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"`
// The URL to the logs for the execution step.
LogUrl *string `locationName:"logUrl" type:"string"`
// The list of screenshot URLs for the execution step, if relevant.
Screenshots map[string]*string `locationName:"screenshots" type:"map"`
// The start date and time of the execution step.
//
// StartTime is a required field
StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"`
// The status of the execution step.
//
// Status is a required field
Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"`
// The reason for the current step status.
StatusReason *string `locationName:"statusReason" type:"string"`
// The name of the execution step.
//
// StepName is a required field
StepName *string `locationName:"stepName" type:"string" required:"true"`
// The URL to the test artifact for the execution step.
TestArtifactsUrl *string `locationName:"testArtifactsUrl" type:"string"`
// The URL to the test configuration for the execution step.
TestConfigUrl *string `locationName:"testConfigUrl" type:"string"`
}
// String returns the string representation
func (s Step) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Step) GoString() string {
return s.String()
}
// SetArtifactsUrl sets the ArtifactsUrl field's value.
func (s *Step) SetArtifactsUrl(v string) *Step {
s.ArtifactsUrl = &v
return s
}
// SetContext sets the Context field's value.
func (s *Step) SetContext(v string) *Step {
s.Context = &v
return s
}
// SetEndTime sets the EndTime field's value.
func (s *Step) SetEndTime(v time.Time) *Step {
s.EndTime = &v
return s
}
// SetLogUrl sets the LogUrl field's value.
func (s *Step) SetLogUrl(v string) *Step {
s.LogUrl = &v
return s
}
// SetScreenshots sets the Screenshots field's value.
func (s *Step) SetScreenshots(v map[string]*string) *Step {
s.Screenshots = v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *Step) SetStartTime(v time.Time) *Step {
s.StartTime = &v
return s
}
// SetStatus sets the Status field's value.
func (s *Step) SetStatus(v string) *Step {
s.Status = &v
return s
}
// SetStatusReason sets the StatusReason field's value.
func (s *Step) SetStatusReason(v string) *Step {
s.StatusReason = &v
return s
}
// SetStepName sets the StepName field's value.
func (s *Step) SetStepName(v string) *Step {
s.StepName = &v
return s
}
// SetTestArtifactsUrl sets the TestArtifactsUrl field's value.
func (s *Step) SetTestArtifactsUrl(v string) *Step {
s.TestArtifactsUrl = &v
return s
}
// SetTestConfigUrl sets the TestConfigUrl field's value.
func (s *Step) SetTestConfigUrl(v string) *Step {
s.TestConfigUrl = &v
return s
}
// The request structure for the stop job request.
type StopJobInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The name for the branch, for the job.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The unique id for the job.
//
// JobId is a required field
JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"`
}
// String returns the string representation
func (s StopJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StopJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StopJobInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *StopJobInput) SetAppId(v string) *StopJobInput {
s.AppId = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *StopJobInput) SetBranchName(v string) *StopJobInput {
s.BranchName = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *StopJobInput) SetJobId(v string) *StopJobInput {
s.JobId = &v
return s
}
// The result structure for the stop job request.
type StopJobOutput struct {
_ struct{} `type:"structure"`
// The summary for the job.
//
// JobSummary is a required field
JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"`
}
// String returns the string representation
func (s StopJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopJobOutput) GoString() string {
return s.String()
}
// SetJobSummary sets the JobSummary field's value.
func (s *StopJobOutput) SetJobSummary(v *JobSummary) *StopJobOutput {
s.JobSummary = v
return s
}
// The subdomain for the domain association.
type SubDomain struct {
_ struct{} `type:"structure"`
// The DNS record for the subdomain.
//
// DnsRecord is a required field
DnsRecord *string `locationName:"dnsRecord" type:"string" required:"true"`
// Describes the settings for the subdomain.
//
// SubDomainSetting is a required field
SubDomainSetting *SubDomainSetting `locationName:"subDomainSetting" type:"structure" required:"true"`
// The verified status of the subdomain
//
// Verified is a required field
Verified *bool `locationName:"verified" type:"boolean" required:"true"`
}
// String returns the string representation
func (s SubDomain) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SubDomain) GoString() string {
return s.String()
}
// SetDnsRecord sets the DnsRecord field's value.
func (s *SubDomain) SetDnsRecord(v string) *SubDomain {
s.DnsRecord = &v
return s
}
// SetSubDomainSetting sets the SubDomainSetting field's value.
func (s *SubDomain) SetSubDomainSetting(v *SubDomainSetting) *SubDomain {
s.SubDomainSetting = v
return s
}
// SetVerified sets the Verified field's value.
func (s *SubDomain) SetVerified(v bool) *SubDomain {
s.Verified = &v
return s
}
// Describes the settings for the subdomain.
type SubDomainSetting struct {
_ struct{} `type:"structure"`
// The branch name setting for the subdomain.
//
// BranchName is a required field
BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"`
// The prefix setting for the subdomain.
//
// Prefix is a required field
Prefix *string `locationName:"prefix" type:"string" required:"true"`
}
// String returns the string representation
func (s SubDomainSetting) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SubDomainSetting) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *SubDomainSetting) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "SubDomainSetting"}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.Prefix == nil {
invalidParams.Add(request.NewErrParamRequired("Prefix"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBranchName sets the BranchName field's value.
func (s *SubDomainSetting) SetBranchName(v string) *SubDomainSetting {
s.BranchName = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *SubDomainSetting) SetPrefix(v string) *SubDomainSetting {
s.Prefix = &v
return s
}
// The request structure to tag a resource with a tag key and value.
type TagResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) to use to tag a resource.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
// The tags used to tag the resource.
//
// Tags is a required field
Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"`
}
// String returns the string representation
func (s TagResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TagResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if s.Tags == nil {
invalidParams.Add(request.NewErrParamRequired("Tags"))
}
if s.Tags != nil && len(s.Tags) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
s.ResourceArn = &v
return s
}
// SetTags sets the Tags field's value.
func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput {
s.Tags = v
return s
}
// The response for the tag resource request.
type TagResourceOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s TagResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagResourceOutput) GoString() string {
return s.String()
}
// An operation failed due to a lack of access.
type UnauthorizedException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s UnauthorizedException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UnauthorizedException) GoString() string {
return s.String()
}
func newErrorUnauthorizedException(v protocol.ResponseMetadata) error {
return &UnauthorizedException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *UnauthorizedException) Code() string {
return "UnauthorizedException"
}
// Message returns the exception's message.
func (s *UnauthorizedException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *UnauthorizedException) OrigErr() error {
return nil
}
func (s *UnauthorizedException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *UnauthorizedException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *UnauthorizedException) RequestID() string {
return s.RespMetadata.RequestID
}
// The request structure for the untag resource request.
type UntagResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) to use to untag a resource.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
// The tag keys to use to untag a resource.
//
// TagKeys is a required field
TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s UntagResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UntagResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if s.TagKeys == nil {
invalidParams.Add(request.NewErrParamRequired("TagKeys"))
}
if s.TagKeys != nil && len(s.TagKeys) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
s.ResourceArn = &v
return s
}
// SetTagKeys sets the TagKeys field's value.
func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
s.TagKeys = v
return s
}
// The response for the untag resource request.
type UntagResourceOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UntagResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagResourceOutput) GoString() string {
return s.String()
}
// The request structure for the update app request.
type UpdateAppInput struct {
_ struct{} `type:"structure"`
// The personal access token for a third-party source control system for an
// Amplify app. The token is used to create webhook and a read-only deploy key.
// The token is not stored.
AccessToken *string `locationName:"accessToken" min:"1" type:"string" sensitive:"true"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The automated branch creation configuration for an Amplify app.
AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"`
// Describes the automated branch creation glob patterns for an Amplify app.
AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"`
// The basic authorization credentials for an Amplify app.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The build specification (build spec) for an Amplify app.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// The custom HTTP headers for an Amplify app.
CustomHeaders *string `locationName:"customHeaders" min:"1" type:"string"`
// The custom redirect and rewrite rules for an Amplify app.
CustomRules []*CustomRule `locationName:"customRules" type:"list"`
// The description for an Amplify app.
Description *string `locationName:"description" type:"string"`
// Enables automated branch creation for an Amplify app.
EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"`
// Enables basic authorization for an Amplify app.
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"`
// Enables branch auto-building for an Amplify app.
EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"`
// Automatically disconnects a branch in the Amplify Console when you delete
// a branch from your Git repository.
EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"`
// The environment variables for an Amplify app.
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"`
// The AWS Identity and Access Management (IAM) service role for an Amplify
// app.
IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"`
// The name for an Amplify app.
Name *string `locationName:"name" min:"1" type:"string"`
// The OAuth token for a third-party source control system for an Amplify app.
// The token is used to create a webhook and a read-only deploy key. The OAuth
// token is not stored.
OauthToken *string `locationName:"oauthToken" type:"string" sensitive:"true"`
// The platform for an Amplify app.
Platform *string `locationName:"platform" type:"string" enum:"Platform"`
// The name of the repository for an Amplify app
Repository *string `locationName:"repository" type:"string"`
}
// String returns the string representation
func (s UpdateAppInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateAppInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateAppInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateAppInput"}
if s.AccessToken != nil && len(*s.AccessToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AccessToken", 1))
}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BuildSpec != nil && len(*s.BuildSpec) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1))
}
if s.CustomHeaders != nil && len(*s.CustomHeaders) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CustomHeaders", 1))
}
if s.IamServiceRoleArn != nil && len(*s.IamServiceRoleArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("IamServiceRoleArn", 1))
}
if s.Name != nil && len(*s.Name) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
}
if s.AutoBranchCreationConfig != nil {
if err := s.AutoBranchCreationConfig.Validate(); err != nil {
invalidParams.AddNested("AutoBranchCreationConfig", err.(request.ErrInvalidParams))
}
}
if s.CustomRules != nil {
for i, v := range s.CustomRules {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CustomRules", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAccessToken sets the AccessToken field's value.
func (s *UpdateAppInput) SetAccessToken(v string) *UpdateAppInput {
s.AccessToken = &v
return s
}
// SetAppId sets the AppId field's value.
func (s *UpdateAppInput) SetAppId(v string) *UpdateAppInput {
s.AppId = &v
return s
}
// SetAutoBranchCreationConfig sets the AutoBranchCreationConfig field's value.
func (s *UpdateAppInput) SetAutoBranchCreationConfig(v *AutoBranchCreationConfig) *UpdateAppInput {
s.AutoBranchCreationConfig = v
return s
}
// SetAutoBranchCreationPatterns sets the AutoBranchCreationPatterns field's value.
func (s *UpdateAppInput) SetAutoBranchCreationPatterns(v []*string) *UpdateAppInput {
s.AutoBranchCreationPatterns = v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *UpdateAppInput) SetBasicAuthCredentials(v string) *UpdateAppInput {
s.BasicAuthCredentials = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *UpdateAppInput) SetBuildSpec(v string) *UpdateAppInput {
s.BuildSpec = &v
return s
}
// SetCustomHeaders sets the CustomHeaders field's value.
func (s *UpdateAppInput) SetCustomHeaders(v string) *UpdateAppInput {
s.CustomHeaders = &v
return s
}
// SetCustomRules sets the CustomRules field's value.
func (s *UpdateAppInput) SetCustomRules(v []*CustomRule) *UpdateAppInput {
s.CustomRules = v
return s
}
// SetDescription sets the Description field's value.
func (s *UpdateAppInput) SetDescription(v string) *UpdateAppInput {
s.Description = &v
return s
}
// SetEnableAutoBranchCreation sets the EnableAutoBranchCreation field's value.
func (s *UpdateAppInput) SetEnableAutoBranchCreation(v bool) *UpdateAppInput {
s.EnableAutoBranchCreation = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *UpdateAppInput) SetEnableBasicAuth(v bool) *UpdateAppInput {
s.EnableBasicAuth = &v
return s
}
// SetEnableBranchAutoBuild sets the EnableBranchAutoBuild field's value.
func (s *UpdateAppInput) SetEnableBranchAutoBuild(v bool) *UpdateAppInput {
s.EnableBranchAutoBuild = &v
return s
}
// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value.
func (s *UpdateAppInput) SetEnableBranchAutoDeletion(v bool) *UpdateAppInput {
s.EnableBranchAutoDeletion = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *UpdateAppInput) SetEnvironmentVariables(v map[string]*string) *UpdateAppInput {
s.EnvironmentVariables = v
return s
}
// SetIamServiceRoleArn sets the IamServiceRoleArn field's value.
func (s *UpdateAppInput) SetIamServiceRoleArn(v string) *UpdateAppInput {
s.IamServiceRoleArn = &v
return s
}
// SetName sets the Name field's value.
func (s *UpdateAppInput) SetName(v string) *UpdateAppInput {
s.Name = &v
return s
}
// SetOauthToken sets the OauthToken field's value.
func (s *UpdateAppInput) SetOauthToken(v string) *UpdateAppInput {
s.OauthToken = &v
return s
}
// SetPlatform sets the Platform field's value.
func (s *UpdateAppInput) SetPlatform(v string) *UpdateAppInput {
s.Platform = &v
return s
}
// SetRepository sets the Repository field's value.
func (s *UpdateAppInput) SetRepository(v string) *UpdateAppInput {
s.Repository = &v
return s
}
// The result structure for an Amplify app update request.
type UpdateAppOutput struct {
_ struct{} `type:"structure"`
// Represents the updated Amplify app.
//
// App is a required field
App *App `locationName:"app" type:"structure" required:"true"`
}
// String returns the string representation
func (s UpdateAppOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateAppOutput) GoString() string {
return s.String()
}
// SetApp sets the App field's value.
func (s *UpdateAppOutput) SetApp(v *App) *UpdateAppOutput {
s.App = v
return s
}
// The request structure for the update branch request.
type UpdateBranchInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// The Amazon Resource Name (ARN) for a backend environment that is part of
// an Amplify app.
BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"`
// The basic authorization credentials for the branch.
BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"`
// The name for the branch.
//
// BranchName is a required field
BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"`
// The build specification (build spec) for the branch.
BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"`
// The description for the branch.
Description *string `locationName:"description" type:"string"`
// The display name for a branch. This is used as the default domain prefix.
DisplayName *string `locationName:"displayName" type:"string"`
// Enables auto building for the branch.
EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"`
// Enables basic authorization for the branch.
EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"`
// Enables notifications for the branch.
EnableNotification *bool `locationName:"enableNotification" type:"boolean"`
// Enables performance mode for the branch.
//
// Performance mode optimizes for faster hosting performance by keeping content
// cached at the edge for a longer interval. When performance mode is enabled,
// hosting configuration or code changes can take up to 10 minutes to roll out.
EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"`
// Enables pull request previews for this branch.
EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"`
// The environment variables for the branch.
EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"`
// The framework for the branch.
Framework *string `locationName:"framework" type:"string"`
// The Amplify environment name for the pull request.
PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"`
// Describes the current stage for the branch.
Stage *string `locationName:"stage" type:"string" enum:"Stage"`
// The content Time to Live (TTL) for the website in seconds.
Ttl *string `locationName:"ttl" type:"string"`
}
// String returns the string representation
func (s UpdateBranchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateBranchInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateBranchInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateBranchInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.BackendEnvironmentArn != nil && len(*s.BackendEnvironmentArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackendEnvironmentArn", 1))
}
if s.BranchName == nil {
invalidParams.Add(request.NewErrParamRequired("BranchName"))
}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.BuildSpec != nil && len(*s.BuildSpec) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BuildSpec", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *UpdateBranchInput) SetAppId(v string) *UpdateBranchInput {
s.AppId = &v
return s
}
// SetBackendEnvironmentArn sets the BackendEnvironmentArn field's value.
func (s *UpdateBranchInput) SetBackendEnvironmentArn(v string) *UpdateBranchInput {
s.BackendEnvironmentArn = &v
return s
}
// SetBasicAuthCredentials sets the BasicAuthCredentials field's value.
func (s *UpdateBranchInput) SetBasicAuthCredentials(v string) *UpdateBranchInput {
s.BasicAuthCredentials = &v
return s
}
// SetBranchName sets the BranchName field's value.
func (s *UpdateBranchInput) SetBranchName(v string) *UpdateBranchInput {
s.BranchName = &v
return s
}
// SetBuildSpec sets the BuildSpec field's value.
func (s *UpdateBranchInput) SetBuildSpec(v string) *UpdateBranchInput {
s.BuildSpec = &v
return s
}
// SetDescription sets the Description field's value.
func (s *UpdateBranchInput) SetDescription(v string) *UpdateBranchInput {
s.Description = &v
return s
}
// SetDisplayName sets the DisplayName field's value.
func (s *UpdateBranchInput) SetDisplayName(v string) *UpdateBranchInput {
s.DisplayName = &v
return s
}
// SetEnableAutoBuild sets the EnableAutoBuild field's value.
func (s *UpdateBranchInput) SetEnableAutoBuild(v bool) *UpdateBranchInput {
s.EnableAutoBuild = &v
return s
}
// SetEnableBasicAuth sets the EnableBasicAuth field's value.
func (s *UpdateBranchInput) SetEnableBasicAuth(v bool) *UpdateBranchInput {
s.EnableBasicAuth = &v
return s
}
// SetEnableNotification sets the EnableNotification field's value.
func (s *UpdateBranchInput) SetEnableNotification(v bool) *UpdateBranchInput {
s.EnableNotification = &v
return s
}
// SetEnablePerformanceMode sets the EnablePerformanceMode field's value.
func (s *UpdateBranchInput) SetEnablePerformanceMode(v bool) *UpdateBranchInput {
s.EnablePerformanceMode = &v
return s
}
// SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value.
func (s *UpdateBranchInput) SetEnablePullRequestPreview(v bool) *UpdateBranchInput {
s.EnablePullRequestPreview = &v
return s
}
// SetEnvironmentVariables sets the EnvironmentVariables field's value.
func (s *UpdateBranchInput) SetEnvironmentVariables(v map[string]*string) *UpdateBranchInput {
s.EnvironmentVariables = v
return s
}
// SetFramework sets the Framework field's value.
func (s *UpdateBranchInput) SetFramework(v string) *UpdateBranchInput {
s.Framework = &v
return s
}
// SetPullRequestEnvironmentName sets the PullRequestEnvironmentName field's value.
func (s *UpdateBranchInput) SetPullRequestEnvironmentName(v string) *UpdateBranchInput {
s.PullRequestEnvironmentName = &v
return s
}
// SetStage sets the Stage field's value.
func (s *UpdateBranchInput) SetStage(v string) *UpdateBranchInput {
s.Stage = &v
return s
}
// SetTtl sets the Ttl field's value.
func (s *UpdateBranchInput) SetTtl(v string) *UpdateBranchInput {
s.Ttl = &v
return s
}
// The result structure for the update branch request.
type UpdateBranchOutput struct {
_ struct{} `type:"structure"`
// The branch for an Amplify app, which maps to a third-party repository branch.
//
// Branch is a required field
Branch *Branch `locationName:"branch" type:"structure" required:"true"`
}
// String returns the string representation
func (s UpdateBranchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateBranchOutput) GoString() string {
return s.String()
}
// SetBranch sets the Branch field's value.
func (s *UpdateBranchOutput) SetBranch(v *Branch) *UpdateBranchOutput {
s.Branch = v
return s
}
// The request structure for the update domain association request.
type UpdateDomainAssociationInput struct {
_ struct{} `type:"structure"`
// The unique ID for an Amplify app.
//
// AppId is a required field
AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"`
// Sets the branch patterns for automatic subdomain creation.
AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"`
// The required AWS Identity and Access Management (IAM) service role for the
// Amazon Resource Name (ARN) for automatically creating subdomains.
AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"`
// The name of the domain.
//
// DomainName is a required field
DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"`
// Enables the automated creation of subdomains for branches.
EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"`
// Describes the settings for the subdomain.
//
// SubDomainSettings is a required field
SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"`
}
// String returns the string representation
func (s UpdateDomainAssociationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateDomainAssociationInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateDomainAssociationInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateDomainAssociationInput"}
if s.AppId == nil {
invalidParams.Add(request.NewErrParamRequired("AppId"))
}
if s.AppId != nil && len(*s.AppId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AppId", 1))
}
if s.DomainName == nil {
invalidParams.Add(request.NewErrParamRequired("DomainName"))
}
if s.DomainName != nil && len(*s.DomainName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DomainName", 1))
}
if s.SubDomainSettings == nil {
invalidParams.Add(request.NewErrParamRequired("SubDomainSettings"))
}
if s.SubDomainSettings != nil {
for i, v := range s.SubDomainSettings {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SubDomainSettings", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAppId sets the AppId field's value.
func (s *UpdateDomainAssociationInput) SetAppId(v string) *UpdateDomainAssociationInput {
s.AppId = &v
return s
}
// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value.
func (s *UpdateDomainAssociationInput) SetAutoSubDomainCreationPatterns(v []*string) *UpdateDomainAssociationInput {
s.AutoSubDomainCreationPatterns = v
return s
}
// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value.
func (s *UpdateDomainAssociationInput) SetAutoSubDomainIAMRole(v string) *UpdateDomainAssociationInput {
s.AutoSubDomainIAMRole = &v
return s
}
// SetDomainName sets the DomainName field's value.
func (s *UpdateDomainAssociationInput) SetDomainName(v string) *UpdateDomainAssociationInput {
s.DomainName = &v
return s
}
// SetEnableAutoSubDomain sets the EnableAutoSubDomain field's value.
func (s *UpdateDomainAssociationInput) SetEnableAutoSubDomain(v bool) *UpdateDomainAssociationInput {
s.EnableAutoSubDomain = &v
return s
}
// SetSubDomainSettings sets the SubDomainSettings field's value.
func (s *UpdateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSetting) *UpdateDomainAssociationInput {
s.SubDomainSettings = v
return s
}
// The result structure for the update domain association request.
type UpdateDomainAssociationOutput struct {
_ struct{} `type:"structure"`
// Describes a domain association, which associates a custom domain with an
// Amplify app.
//
// DomainAssociation is a required field
DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"`
}
// String returns the string representation
func (s UpdateDomainAssociationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateDomainAssociationOutput) GoString() string {
return s.String()
}
// SetDomainAssociation sets the DomainAssociation field's value.
func (s *UpdateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) *UpdateDomainAssociationOutput {
s.DomainAssociation = v
return s
}
// The request structure for the update webhook request.
type UpdateWebhookInput struct {
_ struct{} `type:"structure"`
// The name for a branch that is part of an Amplify app.
BranchName *string `locationName:"branchName" min:"1" type:"string"`
// The description for a webhook.
Description *string `locationName:"description" type:"string"`
// The unique ID for a webhook.
//
// WebhookId is a required field
WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"`
}
// String returns the string representation
func (s UpdateWebhookInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateWebhookInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateWebhookInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateWebhookInput"}
if s.BranchName != nil && len(*s.BranchName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BranchName", 1))
}
if s.WebhookId == nil {
invalidParams.Add(request.NewErrParamRequired("WebhookId"))
}
if s.WebhookId != nil && len(*s.WebhookId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("WebhookId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBranchName sets the BranchName field's value.
func (s *UpdateWebhookInput) SetBranchName(v string) *UpdateWebhookInput {
s.BranchName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *UpdateWebhookInput) SetDescription(v string) *UpdateWebhookInput {
s.Description = &v
return s
}
// SetWebhookId sets the WebhookId field's value.
func (s *UpdateWebhookInput) SetWebhookId(v string) *UpdateWebhookInput {
s.WebhookId = &v
return s
}
// The result structure for the update webhook request.
type UpdateWebhookOutput struct {
_ struct{} `type:"structure"`
// Describes a webhook that connects repository events to an Amplify app.
//
// Webhook is a required field
Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"`
}
// String returns the string representation
func (s UpdateWebhookOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateWebhookOutput) GoString() string {
return s.String()
}
// SetWebhook sets the Webhook field's value.
func (s *UpdateWebhookOutput) SetWebhook(v *Webhook) *UpdateWebhookOutput {
s.Webhook = v
return s
}
// Describes a webhook that connects repository events to an Amplify app.
type Webhook struct {
_ struct{} `type:"structure"`
// The name for a branch that is part of an Amplify app.
//
// BranchName is a required field
BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"`
// The create date and time for a webhook.
//
// CreateTime is a required field
CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"`
// The description for a webhook.
//
// Description is a required field
Description *string `locationName:"description" type:"string" required:"true"`
// Updates the date and time for a webhook.
//
// UpdateTime is a required field
UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"`
// The Amazon Resource Name (ARN) for the webhook.
//
// WebhookArn is a required field
WebhookArn *string `locationName:"webhookArn" type:"string" required:"true"`
// The ID of the webhook.
//
// WebhookId is a required field
WebhookId *string `locationName:"webhookId" type:"string" required:"true"`
// The URL of the webhook.
//
// WebhookUrl is a required field
WebhookUrl *string `locationName:"webhookUrl" type:"string" required:"true"`
}
// String returns the string representation
func (s Webhook) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Webhook) GoString() string {
return s.String()
}
// SetBranchName sets the BranchName field's value.
func (s *Webhook) SetBranchName(v string) *Webhook {
s.BranchName = &v
return s
}
// SetCreateTime sets the CreateTime field's value.
func (s *Webhook) SetCreateTime(v time.Time) *Webhook {
s.CreateTime = &v
return s
}
// SetDescription sets the Description field's value.
func (s *Webhook) SetDescription(v string) *Webhook {
s.Description = &v
return s
}
// SetUpdateTime sets the UpdateTime field's value.
func (s *Webhook) SetUpdateTime(v time.Time) *Webhook {
s.UpdateTime = &v
return s
}
// SetWebhookArn sets the WebhookArn field's value.
func (s *Webhook) SetWebhookArn(v string) *Webhook {
s.WebhookArn = &v
return s
}
// SetWebhookId sets the WebhookId field's value.
func (s *Webhook) SetWebhookId(v string) *Webhook {
s.WebhookId = &v
return s
}
// SetWebhookUrl sets the WebhookUrl field's value.
func (s *Webhook) SetWebhookUrl(v string) *Webhook {
s.WebhookUrl = &v
return s
}
const (
// DomainStatusPendingVerification is a DomainStatus enum value
DomainStatusPendingVerification = "PENDING_VERIFICATION"
// DomainStatusInProgress is a DomainStatus enum value
DomainStatusInProgress = "IN_PROGRESS"
// DomainStatusAvailable is a DomainStatus enum value
DomainStatusAvailable = "AVAILABLE"
// DomainStatusPendingDeployment is a DomainStatus enum value
DomainStatusPendingDeployment = "PENDING_DEPLOYMENT"
// DomainStatusFailed is a DomainStatus enum value
DomainStatusFailed = "FAILED"
// DomainStatusCreating is a DomainStatus enum value
DomainStatusCreating = "CREATING"
// DomainStatusRequestingCertificate is a DomainStatus enum value
DomainStatusRequestingCertificate = "REQUESTING_CERTIFICATE"
// DomainStatusUpdating is a DomainStatus enum value
DomainStatusUpdating = "UPDATING"
)
// DomainStatus_Values returns all elements of the DomainStatus enum
func DomainStatus_Values() []string {
return []string{
DomainStatusPendingVerification,
DomainStatusInProgress,
DomainStatusAvailable,
DomainStatusPendingDeployment,
DomainStatusFailed,
DomainStatusCreating,
DomainStatusRequestingCertificate,
DomainStatusUpdating,
}
}
const (
// JobStatusPending is a JobStatus enum value
JobStatusPending = "PENDING"
// JobStatusProvisioning is a JobStatus enum value
JobStatusProvisioning = "PROVISIONING"
// JobStatusRunning is a JobStatus enum value
JobStatusRunning = "RUNNING"
// JobStatusFailed is a JobStatus enum value
JobStatusFailed = "FAILED"
// JobStatusSucceed is a JobStatus enum value
JobStatusSucceed = "SUCCEED"
// JobStatusCancelling is a JobStatus enum value
JobStatusCancelling = "CANCELLING"
// JobStatusCancelled is a JobStatus enum value
JobStatusCancelled = "CANCELLED"
)
// JobStatus_Values returns all elements of the JobStatus enum
func JobStatus_Values() []string {
return []string{
JobStatusPending,
JobStatusProvisioning,
JobStatusRunning,
JobStatusFailed,
JobStatusSucceed,
JobStatusCancelling,
JobStatusCancelled,
}
}
const (
// JobTypeRelease is a JobType enum value
JobTypeRelease = "RELEASE"
// JobTypeRetry is a JobType enum value
JobTypeRetry = "RETRY"
// JobTypeManual is a JobType enum value
JobTypeManual = "MANUAL"
// JobTypeWebHook is a JobType enum value
JobTypeWebHook = "WEB_HOOK"
)
// JobType_Values returns all elements of the JobType enum
func JobType_Values() []string {
return []string{
JobTypeRelease,
JobTypeRetry,
JobTypeManual,
JobTypeWebHook,
}
}
const (
// PlatformWeb is a Platform enum value
PlatformWeb = "WEB"
)
// Platform_Values returns all elements of the Platform enum
func Platform_Values() []string {
return []string{
PlatformWeb,
}
}
const (
// StageProduction is a Stage enum value
StageProduction = "PRODUCTION"
// StageBeta is a Stage enum value
StageBeta = "BETA"
// StageDevelopment is a Stage enum value
StageDevelopment = "DEVELOPMENT"
// StageExperimental is a Stage enum value
StageExperimental = "EXPERIMENTAL"
// StagePullRequest is a Stage enum value
StagePullRequest = "PULL_REQUEST"
)
// Stage_Values returns all elements of the Stage enum
func Stage_Values() []string {
return []string{
StageProduction,
StageBeta,
StageDevelopment,
StageExperimental,
StagePullRequest,
}
} | // A request contains unexpected data.
//
// * UnauthorizedException
// An operation failed due to a lack of access. |
headerTest.js | 'use strict';
import React from 'react';
import { expect } from 'chai';
import sinon from 'sinon';
import { shallow, mount, render } from 'enzyme';
//import each component in its own import statement as needed
import Header from '../app/components/header.js'; |
//1 wrapper = 1 test
//BAD PRACTICE:
//const wrapper = shallow(...)
//it('should...')
//it('should')
//GOOD PRACTICE:
//const wrapper = shallow(...)
//it('should...')
//const wrapper = shallow(...)
//it('should')
//use mount if stateFUL
describe('<Header />', function() {
const studentProps = {
isLoggedIn: true,
isAdmin: false
};
const adminProps = {
isLoggedIn: true,
isAdmin: true
};
const notLoggedIn = {
isLoggedIn: false,
isAdmin: false
};
it('receives props from its parent component', () => {
const wrapper = shallow(<Header userPrivs={adminProps}/>);
expect(wrapper.instance().props.userPrivs.isLoggedIn).to.equal(true);
expect(wrapper.instance().props.userPrivs.isAdmin).to.equal(true);
});
}) |
//use shallow if stateLESS component
//shallow test = only component and its immediate children are rendered |
UdtBinaryObject.ts | import { AllowedAttributes, CctBinaryObjectType } from './essentials/cct/CctBinaryObject'; | * udt:BinaryObjectType
* A set of finite-length sequences of binary octets.
* Namespace: urn:oasis:names:specification:ubl:schema:xsd:UnqualifiedDataTypes-2
* Schema document: common/UBL-UnqualifiedDataTypes-2.1.xsd
* See More: http://www.datypic.com/sc/ubl21/t-udt_BinaryObjectType.html
*/
export class UdtBinaryObject extends CctBinaryObjectType {
constructor(content: string, attributes: UdtBinaryObjectAttributes) {
super(content, attributes);
}
} |
export type UdtBinaryObjectAttributes = AllowedAttributes;
/** |
importOnapTypes.py | import pycurl
import sys, getopt
from StringIO import StringIO
import json
import copy
from importCommon import *
from importNormativeTypes import *
import importCommon
#####################################################################################################################################################################################################
# #
# Import all users from a given file #
# #
# activation : #
# python importUsers.py [optional -s <scheme> | --scheme=<scheme>, default http] [-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-f <input file> | --ifile=<input file> ] #
# #
# shortest activation (be host = localhost, be port = 8080): #
# python importUsers.py [-f <input file> | --ifile=<input file> ] #
# #
#####################################################################################################################################################################################################
def importOnapTypes(scheme, beHost, bePort, adminUser, fileDir, updateversion):
#Add desired type names to the list
onapTypes = [] | responseCodes = [200, 201, 409]
results = []
for onapType in onapTypes:
result = createNormativeType(scheme, beHost, bePort, adminUser, fileDir, onapType, updateversion)
results.append(result)
if ( result[1] == None or result[1] not in responseCodes) :
print "Failed creating heat type " + onapType + ". " + str(result[1])
return results
def main(argv):
print 'Number of arguments:', len(sys.argv), 'arguments.'
beHost = 'localhost'
bePort = '8080'
adminUser = 'jh0003'
updateversion = 'true'
scheme = 'http'
try:
opts, args = getopt.getopt(argv,"i:p:u:v:h:",["ip=","port=","user=","updateversion="])
except getopt.GetoptError:
usage()
errorAndExit(2, 'Invalid input')
for opt, arg in opts:
#print opt, arg
if opt == '-h':
usage()
sys.exit(3)
elif opt in ("-i", "--ip"):
beHost = arg
elif opt in ("-p", "--port"):
bePort = arg
elif opt in ("-u", "--user"):
adminUser = arg
elif opt in ("-s", "--scheme"):
scheme = arg
elif opt in ("-v", "--updateversion"):
if (arg.lower() == "false" or arg.lower() == "no"):
updateversion = 'false'
print 'scheme =',scheme,',be host =',beHost,', be port =', bePort,', user =', adminUser
if ( beHost == None ):
usage()
sys.exit(3)
results = importOnapTypes(scheme, beHost, bePort, adminUser, "../../../import/tosca/onap-types/", updateversion)
print "-----------------------------"
for result in results:
print "{0:20} | {1:6}".format(result[0], result[1])
print "-----------------------------"
responseCodes = [200, 201]
if(updateversion == 'false'):
responseCodes = [200, 201, 409]
failedNormatives = filter(lambda x: x[1] == None or x[1] not in responseCodes, results)
if (len(failedNormatives) > 0):
errorAndExit(1, None)
else:
errorAndExit(0, None)
if __name__ == "__main__":
main(sys.argv[1:]) |
responseCodes = [200, 201]
if(updateversion == 'false'): |
WithinTimeRange.py | import numpy as np
from ._CFunctions import _CWithinTimeRange
from ._CTConv import _CTConv
def WithinTimeRange(Timet,Time0,Time1,BoolOut=False):
'''
Performs a simple check on a test time (Timet) to see if it exists
between Time0 and time1.
Inputs
======
Timet : tuple | float
Test time - either a single floating point (array or
scalar) to denote hours of the day, or a tuple containing
(Date,Time).
Time0 : tuple | float
Start time, same format as above.
Time1 : tuple | float
End time, same format as above.
BoolOut : boolean
True by default, returns a boolean array with the same size as
Timet, where eath element in the range Time0 to Time1 is true.
When False, returns a list of indices within the time range.
Output
======
out : bool | int
If BoolOut == True boolean (array or scalar), True if within
time range.
When BoolOut == False, an integer array of indices is returned.
'''
sh = np.shape(Timet)
s0 = np.size(Time0)
s1 = np.size(Time1)
if s0 == 2:
D0 = Time0[0]
T0 = Time0[1]
else:
T0 = Time0
D0 = 20000101
if s1 == 2:
D1 = Time1[0] | T1 = Time1[1]
else:
T1 = Time1
D1 = 20000101
if sh[0] == 2 and np.size(sh) == 2:
#hopefully this is a list of date and time
D = np.array([Timet[0]]).flatten()
T = np.array([Timet[1]]).flatten()
else:
T = np.array(Timet)
D = np.zeros(T.size,dtype='int32') + 20000101
#convert the dtypes for compatibility with the C++ code
_n = _CTConv(np.size(D),'c_int')
_Date = _CTConv(D,'c_int_ptr')
_ut = _CTConv(T,'c_float_ptr')
_Date0 = _CTConv(D0,'c_int')
_ut0 = _CTConv(T0,'c_float')
_Date1 = _CTConv(D1,'c_int')
_ut1 = _CTConv(T1,'c_float')
_ni = np.zeros(1,dtype='int32')
_ind = np.zeros(_n,dtype='int32')
#call the C++ code
_CWithinTimeRange(_n,_Date,_ut,_Date0,_ut0,_Date1,_ut1,_ni,_ind)
#reduce the side of the index array
_ind = _ind[:_ni[0]]
#either return the indices or the boolean array
if BoolOut:
out = np.zeros(_n,dtype='bool8')
out[_ind] = True
return out
else:
return _ind | |
native_functions.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{interpreter::Interpreter, loader::Resolver, logging::LogContext};
use move_core_types::{
account_address::AccountAddress, gas_schedule::CostTable, language_storage::CORE_CODE_ADDRESS,
value::MoveTypeLayout, vm_status::StatusType,
};
use move_vm_natives::{account, bcs, debug, event, hash, signature, signer, vector};
use move_vm_types::{
data_store::DataStore,
gas_schedule::GasStatus,
loaded_data::runtime_types::Type,
natives::function::{NativeContext, NativeResult},
values::Value,
};
use std::{collections::VecDeque, fmt::Write};
use vm::errors::PartialVMResult;
// The set of native functions the VM supports.
// The functions can line in any crate linked in but the VM declares them here.
// 2 functions have to be implemented for a `NativeFunction`:
// - `resolve` which given a function unique name ModuleAddress::ModuleName::FunctionName
// returns a `NativeFunction`
// - `dispatch` which given a `NativeFunction` invokes the native
#[derive(Debug, Clone, Copy)]
pub(crate) enum NativeFunction {
HashSha2_256,
HashSha3_256,
BCSToBytes,
PubED25519Validate,
SigED25519Verify,
VectorLength,
VectorEmpty,
VectorBorrow,
VectorBorrowMut,
VectorPushBack,
VectorPopBack,
VectorDestroyEmpty,
VectorSwap,
AccountWriteEvent,
DebugPrint,
DebugPrintStackTrace,
SignerBorrowAddress,
CreateSigner,
// functions below this line are deprecated and remain only for replaying old transactions
DestroySigner,
}
impl NativeFunction {
pub(crate) fn resolve(
module_address: &AccountAddress,
module_name: &str,
function_name: &str,
) -> Option<NativeFunction> {
use NativeFunction::*;
let case = (module_address, module_name, function_name);
Some(match case {
(&CORE_CODE_ADDRESS, "Hash", "sha2_256") => HashSha2_256,
(&CORE_CODE_ADDRESS, "Hash", "sha3_256") => HashSha3_256,
(&CORE_CODE_ADDRESS, "BCS", "to_bytes") => BCSToBytes,
(&CORE_CODE_ADDRESS, "Signature", "ed25519_validate_pubkey") => PubED25519Validate,
(&CORE_CODE_ADDRESS, "Signature", "ed25519_verify") => SigED25519Verify,
(&CORE_CODE_ADDRESS, "Vector", "length") => VectorLength,
(&CORE_CODE_ADDRESS, "Vector", "empty") => VectorEmpty,
(&CORE_CODE_ADDRESS, "Vector", "borrow") => VectorBorrow,
(&CORE_CODE_ADDRESS, "Vector", "borrow_mut") => VectorBorrowMut,
(&CORE_CODE_ADDRESS, "Vector", "push_back") => VectorPushBack,
(&CORE_CODE_ADDRESS, "Vector", "pop_back") => VectorPopBack,
(&CORE_CODE_ADDRESS, "Vector", "destroy_empty") => VectorDestroyEmpty,
(&CORE_CODE_ADDRESS, "Vector", "swap") => VectorSwap, | (&CORE_CODE_ADDRESS, "Event", "write_to_event_store") => AccountWriteEvent,
(&CORE_CODE_ADDRESS, "DiemAccount", "create_signer") => CreateSigner,
(&CORE_CODE_ADDRESS, "Debug", "print") => DebugPrint,
(&CORE_CODE_ADDRESS, "Debug", "print_stack_trace") => DebugPrintStackTrace,
(&CORE_CODE_ADDRESS, "Signer", "borrow_address") => SignerBorrowAddress,
// functions below this line are deprecated and remain only for replaying old transactions
(&CORE_CODE_ADDRESS, "DiemAccount", "destroy_signer") => DestroySigner,
_ => return None,
})
}
/// Given the vector of aguments, it executes the native function.
pub(crate) fn dispatch(
self,
ctx: &mut impl NativeContext,
t: Vec<Type>,
v: VecDeque<Value>,
) -> PartialVMResult<NativeResult> {
let result = match self {
Self::HashSha2_256 => hash::native_sha2_256(ctx, t, v),
Self::HashSha3_256 => hash::native_sha3_256(ctx, t, v),
Self::PubED25519Validate => signature::native_ed25519_publickey_validation(ctx, t, v),
Self::SigED25519Verify => signature::native_ed25519_signature_verification(ctx, t, v),
Self::VectorLength => vector::native_length(ctx, t, v),
Self::VectorEmpty => vector::native_empty(ctx, t, v),
Self::VectorBorrow => vector::native_borrow(ctx, t, v),
Self::VectorBorrowMut => vector::native_borrow(ctx, t, v),
Self::VectorPushBack => vector::native_push_back(ctx, t, v),
Self::VectorPopBack => vector::native_pop(ctx, t, v),
Self::VectorDestroyEmpty => vector::native_destroy_empty(ctx, t, v),
Self::VectorSwap => vector::native_swap(ctx, t, v),
// natives that need the full API of `NativeContext`
Self::AccountWriteEvent => event::native_emit_event(ctx, t, v),
Self::BCSToBytes => bcs::native_to_bytes(ctx, t, v),
Self::DebugPrint => debug::native_print(ctx, t, v),
Self::DebugPrintStackTrace => debug::native_print_stack_trace(ctx, t, v),
Self::SignerBorrowAddress => signer::native_borrow_address(ctx, t, v),
Self::CreateSigner => account::native_create_signer(ctx, t, v),
// functions below this line are deprecated and remain only for replaying old transactions
Self::DestroySigner => account::native_destroy_signer(ctx, t, v),
};
debug_assert!(match &result {
Err(e) => e.major_status().status_type() == StatusType::InvariantViolation,
Ok(_) => true,
});
result
}
}
pub(crate) struct FunctionContext<'a, L: LogContext> {
interpreter: &'a mut Interpreter<L>,
data_store: &'a mut dyn DataStore,
gas_status: &'a GasStatus<'a>,
resolver: &'a Resolver<'a>,
}
impl<'a, L: LogContext> FunctionContext<'a, L> {
pub(crate) fn new(
interpreter: &'a mut Interpreter<L>,
data_store: &'a mut dyn DataStore,
gas_status: &'a mut GasStatus,
resolver: &'a Resolver<'a>,
) -> FunctionContext<'a, L> {
FunctionContext {
interpreter,
data_store,
gas_status,
resolver,
}
}
}
impl<'a, L: LogContext> NativeContext for FunctionContext<'a, L> {
fn print_stack_trace<B: Write>(&self, buf: &mut B) -> PartialVMResult<()> {
self.interpreter
.debug_print_stack_trace(buf, self.resolver.loader())
}
fn cost_table(&self) -> &CostTable {
self.gas_status.cost_table()
}
fn save_event(
&mut self,
guid: Vec<u8>,
seq_num: u64,
ty: Type,
val: Value,
) -> PartialVMResult<bool> {
match self.data_store.emit_event(guid, seq_num, ty, val) {
Ok(()) => Ok(true),
Err(e) if e.major_status().status_type() == StatusType::InvariantViolation => Err(e),
Err(_) => Ok(false),
}
}
fn type_to_type_layout(&self, ty: &Type) -> PartialVMResult<Option<MoveTypeLayout>> {
match self.resolver.type_to_type_layout(ty) {
Ok(ty_layout) => Ok(Some(ty_layout)),
Err(e) if e.major_status().status_type() == StatusType::InvariantViolation => Err(e),
Err(_) => Ok(None),
}
}
} | |
frontend.go | // this file is for the implementation of all the frontend-requested service
// endpoints for wallets.
package stellarsvc
import (
"context"
"errors"
"fmt"
"sort"
"unicode/utf8"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/stellar1"
"github.com/keybase/client/go/stellar"
"github.com/keybase/client/go/stellar/remote"
"github.com/keybase/client/go/stellar/stellarcommon"
"github.com/keybase/stellarnet"
)
const WorthCurrencyErrorCode = "ERR"
var ErrAccountIDMissing = errors.New("account id parameter missing")
func (s *Server) GetWalletAccountsLocal(ctx context.Context, sessionID int) (accts []stellar1.WalletAccountLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetWalletAccountsLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return nil, err
}
return stellar.AllWalletAccounts(mctx, s.remoter)
}
func (s *Server) GetWalletAccountLocal(ctx context.Context, arg stellar1.GetWalletAccountLocalArg) (acct stellar1.WalletAccountLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetWalletAccountLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return acct, err
}
if arg.AccountID.IsNil() {
mctx.Debug("GetWalletAccountLocal called with an empty account id")
return acct, ErrAccountIDMissing
}
return stellar.WalletAccount(mctx, s.remoter, arg.AccountID)
}
func (s *Server) GetAccountAssetsLocal(ctx context.Context, arg stellar1.GetAccountAssetsLocalArg) (assets []stellar1.AccountAssetLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetAccountAssetsLocal",
Err: &err,
})
defer fin()
if err != nil {
return nil, err
}
if arg.AccountID.IsNil() {
s.G().Log.CDebugf(ctx, "GetAccountAssetsLocal called with an empty account id")
return nil, ErrAccountIDMissing
}
details, err := stellar.AccountDetails(mctx, s.remoter, arg.AccountID)
if err != nil {
s.G().Log.CDebugf(ctx, "remote.Details failed for %q: %s", arg.AccountID, err)
return nil, err
}
if len(details.Balances) == 0 {
// add an empty xlm balance
s.G().Log.CDebugf(ctx, "Account has no balances - adding default 0 XLM balance")
stellar.EmptyAmountStack(mctx)
details.Available = "0"
details.Balances = []stellar1.Balance{
{
Amount: "0",
Asset: stellar1.Asset{Type: "native"},
},
}
}
if details.Available == "" {
s.G().Log.CDebugf(ctx, "details.Available is empty: %+v", details)
stellar.EmptyAmountStack(mctx)
details.Available = "0"
s.G().Log.CDebugf(ctx, `set details.Available from empty to "0"`)
}
displayCurrency, err := stellar.GetAccountDisplayCurrency(mctx, arg.AccountID)
if err != nil {
return nil, err
}
s.G().Log.CDebugf(ctx, "Display currency for account %q is %q", arg.AccountID, displayCurrency)
rate, rateErr := s.remoter.ExchangeRate(ctx, displayCurrency)
if rateErr != nil {
s.G().Log.CDebugf(ctx, "exchange rate error: %s", rateErr)
}
for _, d := range details.Balances {
fmtAmount, err := stellar.FormatAmount(mctx, d.Amount, false, stellarnet.Round)
if err != nil {
s.G().Log.CDebugf(ctx, "FormatAmount error: %s", err)
return nil, err
}
if d.Asset.IsNativeXLM() {
baseFee := s.walletState.BaseFee(mctx)
availableAmount := stellar.SubtractFeeSoft(mctx, details.Available, baseFee)
if availableAmount == "" {
s.G().Log.CDebugf(ctx, "stellar.SubtractFeeSoft returned empty available amount, setting it to 0")
stellar.EmptyAmountStack(mctx)
availableAmount = "0"
}
fmtAvailable, err := stellar.FormatAmount(mctx, availableAmount, false, stellarnet.Round)
if err != nil {
return nil, err
}
asset := stellar1.AccountAssetLocal{
Name: "Lumens",
AssetCode: "XLM",
IssuerName: "Stellar network",
IssuerAccountID: "",
BalanceTotal: fmtAmount,
BalanceAvailableToSend: fmtAvailable,
WorthCurrency: displayCurrency,
}
fillWorths := func() (err error) {
if rateErr != nil {
return fmt.Errorf("rate error: %v", rateErr)
}
outsideAmount, err := stellarnet.ConvertXLMToOutside(d.Amount, rate.Rate)
if err != nil {
return fmt.Errorf("converting amount: %v", err)
}
fmtWorth, err := stellar.FormatCurrencyWithCodeSuffix(mctx, outsideAmount, rate.Currency, stellarnet.Round)
if err != nil {
return fmt.Errorf("formatting converted amount: %v", err)
}
asset.Worth = fmtWorth
outsideAvailableAmount, err := stellarnet.ConvertXLMToOutside(availableAmount, rate.Rate)
if err != nil {
return fmt.Errorf("converting available amount: %v", err)
}
fmtAvailableWorth, err := stellar.FormatCurrencyWithCodeSuffix(mctx, outsideAvailableAmount, rate.Currency, stellarnet.Round)
if err != nil {
return fmt.Errorf("formatting converted available amount: %v", err)
}
asset.AvailableToSendWorth = fmtAvailableWorth
return nil
}
err = fillWorths()
if err != nil {
s.G().Log.CDebugf(ctx, "error populating converted worth fields: %v", err)
asset.WorthCurrency = WorthCurrencyErrorCode
asset.Worth = "Currency conversion error"
asset.AvailableToSendWorth = "Currency conversion error"
}
// Add account reserves info to main asset.
asset.Reserves = details.Reserves
assets = append(assets, asset)
} else {
assets = append(assets, stellar1.AccountAssetLocal{
Name: d.Asset.Code,
AssetCode: d.Asset.Code,
IssuerName: d.Asset.IssuerName,
IssuerAccountID: d.Asset.Issuer,
IssuerVerifiedDomain: d.Asset.VerifiedDomain,
BalanceTotal: fmtAmount,
BalanceAvailableToSend: fmtAmount,
WorthCurrency: "",
Worth: "",
AvailableToSendWorth: "",
Desc: d.Asset.Desc,
InfoUrl: d.Asset.InfoUrl,
InfoUrlText: d.Asset.InfoUrlText,
})
}
}
return assets, nil
}
func (s *Server) GetDisplayCurrenciesLocal(ctx context.Context, sessionID int) (currencies []stellar1.CurrencyLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetDisplayCurrenciesLocal",
Err: &err,
})
defer fin()
if err != nil {
return nil, err
}
conf, err := s.G().GetStellar().GetServerDefinitions(mctx.Ctx())
if err != nil {
return nil, err
}
for code := range conf.Currencies {
c, ok := conf.GetCurrencyLocal(code)
if ok {
currencies = append(currencies, c)
}
}
sort.Slice(currencies, func(i, j int) bool {
if currencies[i].Code == "USD" {
return true
}
if currencies[j].Code == "USD" {
return false
}
return currencies[i].Code < currencies[j].Code
})
return currencies, nil
}
func (s *Server) HasAcceptedDisclaimerLocal(ctx context.Context, sessionID int) (accepted bool, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "HasAcceptedDisclaimerLocal",
Err: &err,
})
defer fin()
if err != nil {
return false, err
}
return stellar.HasAcceptedDisclaimer(mctx.Ctx(), s.G())
}
func (s *Server) AcceptDisclaimerLocal(ctx context.Context, sessionID int) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AcceptDisclaimerLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
err = remote.SetAcceptedDisclaimer(mctx.Ctx(), s.G())
if err != nil {
return err
}
stellar.InformAcceptedDisclaimer(mctx.Ctx(), s.G())
cwg, err := stellar.CreateWalletGated(mctx)
if err != nil {
return err
}
if !cwg.HasWallet {
return fmt.Errorf("user wallet not created")
}
err = s.walletState.RefreshAll(mctx, "AcceptDisclaimer")
if err != nil {
mctx.Debug("AcceptDisclaimer RefreshAll error: %s", err)
}
return nil
}
func (s *Server) LinkNewWalletAccountLocal(ctx context.Context, arg stellar1.LinkNewWalletAccountLocalArg) (accountID stellar1.AccountID, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "LinkNewWalletAccountLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return "", err
}
_, accountID, _, err = libkb.ParseStellarSecretKey(string(arg.SecretKey))
if err != nil {
return "", err
}
err = stellar.ImportSecretKey(mctx, arg.SecretKey, false, arg.Name)
if err != nil {
return "", err
}
err = s.walletState.RefreshAll(mctx, "LinkNewWalletAccount")
if err != nil {
mctx.Debug("LinkNewWalletAccountLocal RefreshAll error: %s", err)
}
return accountID, nil
}
func (s *Server) GetPaymentsLocal(ctx context.Context, arg stellar1.GetPaymentsLocalArg) (page stellar1.PaymentsPageLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetPaymentsLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return page, err
}
if arg.AccountID.IsNil() {
s.G().Log.CDebugf(ctx, "GetPaymentsLocal called with an empty account id")
return page, ErrAccountIDMissing
}
rpArg := remote.RecentPaymentsArg{
AccountID: arg.AccountID,
Cursor: arg.Cursor,
SkipPending: true,
IncludeAdvanced: true,
}
srvPayments, err := s.remoter.RecentPayments(ctx, rpArg)
if err != nil {
return page, err
}
return stellar.RemoteRecentPaymentsToPage(mctx, s.remoter, arg.AccountID, srvPayments)
}
func (s *Server) GetPendingPaymentsLocal(ctx context.Context, arg stellar1.GetPendingPaymentsLocalArg) (payments []stellar1.PaymentOrErrorLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetPendingPaymentsLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return nil, err
}
if arg.AccountID.IsNil() {
s.G().Log.CDebugf(ctx, "GetPendingPaymentsLocal called with an empty account id")
return payments, ErrAccountIDMissing
}
pending, err := s.remoter.PendingPayments(ctx, arg.AccountID, 0)
if err != nil {
return nil, err
}
return stellar.RemotePendingToLocal(mctx, s.remoter, arg.AccountID, pending)
}
func (s *Server) GetPaymentDetailsLocal(ctx context.Context, arg stellar1.GetPaymentDetailsLocalArg) (payment stellar1.PaymentDetailsLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetPaymentDetailsLocal",
Err: &err,
})
defer fin()
if err != nil {
return payment, err
}
if arg.AccountID.IsNil() {
return payment, errors.New("AccountID required for GetPaymentDetailsLocal")
}
oc := stellar.NewOwnAccountLookupCache(mctx)
details, err := s.remoter.PaymentDetails(ctx, arg.AccountID, stellar1.TransactionIDFromPaymentID(arg.Id).String())
if err != nil {
return payment, err
}
summary, err := stellar.TransformPaymentSummaryAccount(mctx, details.Summary, oc, arg.AccountID)
if err != nil {
return payment, err
}
var fee string
if details.FeeCharged != "" {
fee, err = stellar.FormatAmountDescriptionXLM(mctx, details.FeeCharged)
if err != nil {
return payment, err
}
}
summary.TxID = stellar1.TransactionIDFromPaymentID(summary.Id)
return stellar1.PaymentDetailsLocal{
Summary: *summary,
Details: stellar1.PaymentDetailsOnlyLocal{
PublicNote: details.Memo,
PublicNoteType: details.MemoType,
ExternalTxURL: details.ExternalTxURL,
FeeChargedDescription: fee,
PathIntermediate: details.PathIntermediate,
},
}, nil
}
func (s *Server) GetGenericPaymentDetailsLocal(ctx context.Context, arg stellar1.GetGenericPaymentDetailsLocalArg) (payment stellar1.PaymentDetailsLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetGenericPaymentDetailsLocal",
Err: &err,
})
defer fin()
if err != nil {
return payment, err
}
oc := stellar.NewOwnAccountLookupCache(mctx)
details, err := s.remoter.PaymentDetailsGeneric(ctx, stellar1.TransactionIDFromPaymentID(arg.Id).String())
if err != nil {
return payment, err
}
summary, err := stellar.TransformPaymentSummaryGeneric(mctx, details.Summary, oc)
if err != nil {
return payment, err
}
summary.TxID = stellar1.TransactionIDFromPaymentID(summary.Id)
return stellar1.PaymentDetailsLocal{
Summary: *summary,
Details: stellar1.PaymentDetailsOnlyLocal{
PublicNote: details.Memo,
PublicNoteType: details.MemoType,
ExternalTxURL: details.ExternalTxURL,
},
}, nil
}
func (s *Server) CancelPaymentLocal(ctx context.Context, arg stellar1.CancelPaymentLocalArg) (res stellar1.RelayClaimResult, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "CancelPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
details, err := s.remoter.PaymentDetailsGeneric(mctx.Ctx(), stellar1.TransactionIDFromPaymentID(arg.PaymentID).String())
if err != nil {
return res, err
}
typ, err := details.Summary.Typ()
if err != nil {
return res, err
}
if typ != stellar1.PaymentSummaryType_RELAY {
return res, errors.New("tried to cancel a non-relay payment")
}
relay := details.Summary.Relay()
dir := stellar1.RelayDirection_YANK
return stellar.Claim(mctx, s.walletState, relay.KbTxID.String(), relay.FromStellar, &dir, nil)
}
func (s *Server) ValidateAccountIDLocal(ctx context.Context, arg stellar1.ValidateAccountIDLocalArg) (err error) {
_, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ValidateAccountIDLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
_, err = libkb.ParseStellarAccountID(arg.AccountID.String())
return err
}
func (s *Server) ValidateSecretKeyLocal(ctx context.Context, arg stellar1.ValidateSecretKeyLocalArg) (err error) {
_, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ValidateSecretKeyLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
_, _, _, err = libkb.ParseStellarSecretKey(arg.SecretKey.SecureNoLogString())
return err
}
func (s *Server) ValidateAccountNameLocal(ctx context.Context, arg stellar1.ValidateAccountNameLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ValidateAccountNameLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
// Make sure to keep this validation in sync with ChangeAccountName.
if arg.Name == "" {
return fmt.Errorf("name required")
}
runes := utf8.RuneCountInString(arg.Name)
if runes > stellar.AccountNameMaxRunes {
return fmt.Errorf("account name can be %v characters at the longest but was %v", stellar.AccountNameMaxRunes, runes)
}
// If this becomes a bottleneck, cache non-critical wallet info on G.Stellar.
currentBundle, err := remote.FetchSecretlessBundle(mctx)
if err != nil {
s.G().Log.CErrorf(ctx, "error fetching bundle: %v", err)
// Return nil since the name is probably fine.
return nil
}
for _, account := range currentBundle.Accounts {
if arg.Name == account.Name {
return fmt.Errorf("you already have an account with that name")
}
}
return nil
}
func (s *Server) ChangeWalletAccountNameLocal(ctx context.Context, arg stellar1.ChangeWalletAccountNameLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ChangeWalletAccountNameLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
mctx.Debug("ChangeWalletAccountNameLocal called with an empty account id")
return ErrAccountIDMissing
}
return stellar.ChangeAccountName(mctx, s.walletState, arg.AccountID, arg.NewName)
}
func (s *Server) SetWalletAccountAsDefaultLocal(ctx context.Context, arg stellar1.SetWalletAccountAsDefaultLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SetWalletAccountAsDefaultLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
mctx.Debug("SetWalletAccountAsDefaultLocal called with an empty account id")
return ErrAccountIDMissing
}
return stellar.SetAccountAsPrimary(mctx, s.walletState, arg.AccountID)
}
func (s *Server) DeleteWalletAccountLocal(ctx context.Context, arg stellar1.DeleteWalletAccountLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "DeleteWalletAccountLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
if arg.UserAcknowledged != "yes" {
return errors.New("User did not acknowledge")
}
if arg.AccountID.IsNil() {
mctx.Debug("DeleteWalletAccountLocal called with an empty account id")
return ErrAccountIDMissing
}
return stellar.DeleteAccount(mctx, arg.AccountID)
}
func (s *Server) ChangeDisplayCurrencyLocal(ctx context.Context, arg stellar1.ChangeDisplayCurrencyLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ChangeDisplayCurrencyLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
return ErrAccountIDMissing
}
return remote.SetAccountDefaultCurrency(mctx.Ctx(), s.G(), arg.AccountID, string(arg.Currency))
}
func (s *Server) GetDisplayCurrencyLocal(ctx context.Context, arg stellar1.GetDisplayCurrencyLocalArg) (res stellar1.CurrencyLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetDisplayCurrencyLocal",
Err: &err,
})
defer fin()
if err != nil {
return res, err
}
accountID := arg.AccountID
if accountID == nil {
primaryAccountID, err := stellar.GetOwnPrimaryAccountID(mctx)
if err != nil {
return res, err
}
accountID = &primaryAccountID
}
return stellar.GetCurrencySetting(mctx, *accountID)
}
func (s *Server) GetWalletAccountPublicKeyLocal(ctx context.Context, arg stellar1.GetWalletAccountPublicKeyLocalArg) (res string, err error) {
_, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetWalletAccountPublicKeyLocal",
Err: &err,
AllowLoggedOut: true,
})
defer fin()
if err != nil {
return res, err
}
if arg.AccountID.IsNil() {
return res, ErrAccountIDMissing
}
return arg.AccountID.String(), nil
}
func (s *Server) GetWalletAccountSecretKeyLocal(ctx context.Context, arg stellar1.GetWalletAccountSecretKeyLocalArg) (res stellar1.SecretKey, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetWalletAccountSecretKeyLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
if arg.AccountID.IsNil() {
return res, ErrAccountIDMissing
}
return stellar.ExportSecretKey(mctx, arg.AccountID)
}
func (s *Server) GetSendAssetChoicesLocal(ctx context.Context, arg stellar1.GetSendAssetChoicesLocalArg) (res []stellar1.SendAssetChoiceLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetSendAssetChoicesLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
return stellar.GetSendAssetChoicesLocal(mctx, s.remoter, arg)
}
func (s *Server) StartBuildPaymentLocal(ctx context.Context, sessionID int) (res stellar1.BuildPaymentID, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "StartBuildPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
return stellar.StartBuildPaymentLocal(mctx)
}
func (s *Server) StopBuildPaymentLocal(ctx context.Context, arg stellar1.StopBuildPaymentLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "StopBuildPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
stellar.StopBuildPaymentLocal(mctx, arg.Bid)
return nil
}
func (s *Server) BuildPaymentLocal(ctx context.Context, arg stellar1.BuildPaymentLocalArg) (res stellar1.BuildPaymentResLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "BuildPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
return stellar.BuildPaymentLocal(mctx, arg)
}
func (s *Server) ReviewPaymentLocal(ctx context.Context, arg stellar1.ReviewPaymentLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ReviewPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return stellar.ReviewPaymentLocal(mctx, s.uiSource.StellarUI(), arg)
}
func (s *Server) SendPaymentLocal(ctx context.Context, arg stellar1.SendPaymentLocalArg) (res stellar1.SendPaymentResLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SendPaymentLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
return stellar.SendPaymentLocal(mctx, arg)
}
func (s *Server) SendPathLocal(ctx context.Context, arg stellar1.SendPathLocalArg) (res stellar1.SendPaymentResLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SendPathLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
sendRes, err := stellar.SendPathPaymentGUI(mctx, s.walletState, stellar.SendPathPaymentArg{
From: arg.Source,
To: stellarcommon.RecipientInput(arg.Recipient),
Path: arg.Path,
SecretNote: arg.Note,
PublicMemo: stellarnet.NewMemoText(arg.PublicNote),
QuickReturn: true,
})
if err != nil {
return res, err
}
return stellar1.SendPaymentResLocal{
KbTxID: sendRes.KbTxID,
Pending: sendRes.Pending,
}, nil
}
func (s *Server) CreateWalletAccountLocal(ctx context.Context, arg stellar1.CreateWalletAccountLocalArg) (res stellar1.AccountID, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "CreateWalletAccountLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil |
return stellar.CreateNewAccount(mctx, arg.Name)
}
func (s *Server) BuildRequestLocal(ctx context.Context, arg stellar1.BuildRequestLocalArg) (res stellar1.BuildRequestResLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "BuildRequestLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
return stellar.BuildRequestLocal(mctx, arg)
}
func (s *Server) GetRequestDetailsLocal(ctx context.Context, arg stellar1.GetRequestDetailsLocalArg) (res stellar1.RequestDetailsLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetRequestDetailsLocal",
Err: &err,
})
defer fin()
if err != nil {
return stellar1.RequestDetailsLocal{}, err
}
details, err := s.remoter.RequestDetails(mctx.Ctx(), arg.ReqID)
if err != nil {
return stellar1.RequestDetailsLocal{}, err
}
local, err := stellar.TransformRequestDetails(mctx, details)
if err != nil {
return stellar1.RequestDetailsLocal{}, err
}
return *local, nil
}
func (s *Server) MakeRequestLocal(ctx context.Context, arg stellar1.MakeRequestLocalArg) (res stellar1.KeybaseRequestID, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "MakeRequestLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return "", err
}
return stellar.MakeRequestGUI(mctx, s.remoter, stellar.MakeRequestArg{
To: stellarcommon.RecipientInput(arg.Recipient),
Amount: arg.Amount,
Asset: arg.Asset,
Currency: arg.Currency,
Note: arg.Note,
})
}
func (s *Server) CancelRequestLocal(ctx context.Context, arg stellar1.CancelRequestLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "CancelRequestLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
return s.remoter.CancelRequest(mctx.Ctx(), arg.ReqID)
}
func (s *Server) MarkAsReadLocal(ctx context.Context, arg stellar1.MarkAsReadLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "MarkAsReadLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
mctx.Debug("IsAccountMobileOnlyLocal called with an empty account id")
return ErrAccountIDMissing
}
err = s.remoter.MarkAsRead(mctx.Ctx(), arg.AccountID, stellar1.TransactionIDFromPaymentID(arg.MostRecentID))
if err != nil {
return err
}
go stellar.RefreshUnreadCount(s.G(), arg.AccountID)
return nil
}
func (s *Server) IsAccountMobileOnlyLocal(ctx context.Context, arg stellar1.IsAccountMobileOnlyLocalArg) (mobileOnly bool, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "IsAccountMobileOnlyLocal",
Err: &err,
})
defer fin()
if err != nil {
return false, err
}
if arg.AccountID.IsNil() {
mctx.Debug("IsAccountMobileOnlyLocal called with an empty account id")
return false, ErrAccountIDMissing
}
return s.remoter.IsAccountMobileOnly(mctx.Ctx(), arg.AccountID)
}
func (s *Server) SetAccountMobileOnlyLocal(ctx context.Context, arg stellar1.SetAccountMobileOnlyLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SetAccountMobileOnlyLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
mctx.Debug("SetAccountMobileOnlyLocal called with an empty account id")
return ErrAccountIDMissing
}
if err = s.remoter.SetAccountMobileOnly(mctx.Ctx(), arg.AccountID); err != nil {
return err
}
if err = s.walletState.UpdateAccountEntries(mctx, "set account mobile only"); err != nil {
return err
}
return nil
}
func (s *Server) SetAccountAllDevicesLocal(ctx context.Context, arg stellar1.SetAccountAllDevicesLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SetAccountAllDevicesLocal",
Err: &err,
})
defer fin()
if err != nil {
return err
}
if arg.AccountID.IsNil() {
mctx.Debug("SetAccountAllDevicesLocal called with an empty account id")
return ErrAccountIDMissing
}
if err = s.remoter.MakeAccountAllDevices(mctx.Ctx(), arg.AccountID); err != nil {
return err
}
if err = s.walletState.UpdateAccountEntries(mctx, "set account all devices"); err != nil {
return err
}
return nil
}
func (s *Server) GetPredefinedInflationDestinationsLocal(ctx context.Context, sessionID int) (ret []stellar1.PredefinedInflationDestination, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetPredefinedInflationDestinations",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return ret, err
}
return stellar.GetPredefinedInflationDestinations(mctx)
}
func (s *Server) SetInflationDestinationLocal(ctx context.Context, arg stellar1.SetInflationDestinationLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "SetInflationDestinationLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return stellar.SetInflationDestinationLocal(mctx, arg)
}
func (s *Server) GetInflationDestinationLocal(ctx context.Context, arg stellar1.GetInflationDestinationLocalArg) (res stellar1.InflationDestinationResultLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetInflationDestinationLocal",
Err: &err,
RequireWallet: false,
})
defer fin()
if err != nil {
return res, err
}
if arg.AccountID.IsNil() {
mctx.Debug("GetInflationDestinationLocal called with an empty account id")
return res, ErrAccountIDMissing
}
return stellar.GetInflationDestination(mctx, arg.AccountID)
}
func (s *Server) AirdropDetailsLocal(ctx context.Context, sessionID int) (resp stellar1.AirdropDetails, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AirdropDetailsLocal",
Err: &err,
RequireWallet: false,
})
defer fin()
if err != nil {
return stellar1.AirdropDetails{}, err
}
isPromoted, details, err := remote.AirdropDetails(mctx)
if err != nil {
return stellar1.AirdropDetails{}, err
}
return stellar1.AirdropDetails{IsPromoted: isPromoted, Details: details}, nil
}
func (s *Server) AirdropRegisterLocal(ctx context.Context, arg stellar1.AirdropRegisterLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AirdropRegisterLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return remote.AirdropRegister(mctx, arg.Register)
}
func (s *Server) AirdropStatusLocal(ctx context.Context, sessionID int) (status stellar1.AirdropStatus, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AirdropStatusLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return stellar1.AirdropStatus{}, err
}
return stellar.AirdropStatus(mctx)
}
func (s *Server) AddTrustlineLocal(ctx context.Context, arg stellar1.AddTrustlineLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AddTrustline",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return stellar.AddTrustlineLocal(mctx, arg)
}
func (s *Server) DeleteTrustlineLocal(ctx context.Context, arg stellar1.DeleteTrustlineLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "AddTrustline",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return stellar.DeleteTrustlineLocal(mctx, arg)
}
func (s *Server) ChangeTrustlineLimitLocal(ctx context.Context, arg stellar1.ChangeTrustlineLimitLocalArg) (err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ChangeTrustlineLimit",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return err
}
return stellar.ChangeTrustlineLimitLocal(mctx, arg)
}
func (s *Server) GetTrustlinesLocal(ctx context.Context, arg stellar1.GetTrustlinesLocalArg) (ret []stellar1.Balance, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetTrustlinesLocal",
Err: &err,
})
defer fin()
if err != nil {
return ret, err
}
return s.getTrustlinesAccountID(mctx, arg.AccountID)
}
func (s *Server) GetTrustlinesForRecipientLocal(ctx context.Context, arg stellar1.GetTrustlinesForRecipientLocalArg) (ret stellar1.RecipientTrustlinesLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "GetTrustlinesByRecipientLocal",
Err: &err,
})
defer fin()
if err != nil {
return ret, err
}
recipient, err := stellar.LookupRecipient(mctx, stellarcommon.RecipientInput(arg.Recipient), false)
if err != nil {
return ret, err
}
if recipient.AccountID == nil {
return ret, errors.New("recipient has no stellar accounts")
}
trustlines, err := s.getTrustlinesAccountID(mctx, stellar1.AccountID(*recipient.AccountID))
if err != nil {
return ret, err
}
ret.Trustlines = trustlines
if recipient.User != nil {
ret.RecipientType = stellar1.ParticipantType_KEYBASE
} else {
ret.RecipientType = stellar1.ParticipantType_STELLAR
}
return ret, nil
}
func (s *Server) getTrustlinesAccountID(mctx libkb.MetaContext, accountID stellar1.AccountID) (ret []stellar1.Balance, err error) {
balances, err := s.remoter.Balances(mctx.Ctx(), accountID)
if err != nil {
return ret, err
}
if len(balances) == 0 {
// Account is not on the network - no balances means no trustlines.
return ret, nil
}
ret = make([]stellar1.Balance, 0, len(balances)-1)
for _, balance := range balances {
if !balance.Asset.IsNativeXLM() {
ret = append(ret, balance)
}
}
return ret, nil
}
func (s *Server) FindPaymentPathLocal(ctx context.Context, arg stellar1.FindPaymentPathLocalArg) (res stellar1.PaymentPathLocal, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "FindPaymentPathLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
path, err := stellar.FindPaymentPath(mctx, s.remoter, arg.From, arg.To, arg.SourceAsset, arg.DestinationAsset, arg.Amount)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
res.FullPath = path
res.SourceDisplay, err = stellar.FormatAmount(mctx, path.SourceAmount, false, stellarnet.Round)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
res.SourceMaxDisplay, err = stellar.FormatAmount(mctx, path.SourceAmountMax, false, stellarnet.Round)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
res.DestinationDisplay, err = stellar.FormatAmount(mctx, path.DestinationAmount, false, stellarnet.Round)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
destAmt, err := stellarnet.ParseAmount(path.DestinationAmount)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
srcAmt, err := stellarnet.ParseAmount(path.SourceAmount)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
srcAmt.Quo(srcAmt, destAmt)
exchangeRateLeft, err := stellar.FormatAmountDescriptionAsset(mctx, "1", path.DestinationAsset)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
exchangeRateRight, err := stellar.FormatAmountDescriptionAsset(mctx, srcAmt.FloatString(7), path.SourceAsset)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
res.ExchangeRate = fmt.Sprintf("%s = %s", exchangeRateLeft, exchangeRateRight)
if len(path.SourceInsufficientBalance) > 0 {
availableToSpend, err := stellar.FormatAmountDescriptionAssetEx2(mctx, path.SourceInsufficientBalance, path.SourceAsset)
if err != nil {
return stellar1.PaymentPathLocal{}, err
}
res.AmountError = fmt.Sprintf("You only have %s available to spend.", availableToSpend)
}
return res, nil
}
func (s *Server) FuzzyAssetSearchLocal(ctx context.Context, arg stellar1.FuzzyAssetSearchLocalArg) (res []stellar1.Asset, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "FuzzyAssetSearchLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
remoteArg := stellar1.FuzzyAssetSearchArg{
SearchString: arg.SearchString,
}
return stellar.FuzzyAssetSearch(mctx, s.remoter, remoteArg)
}
func (s *Server) ListPopularAssetsLocal(ctx context.Context, sessionID int) (res stellar1.AssetListResult, err error) {
mctx, fin, err := s.Preamble(ctx, preambleArg{
RPCName: "ListPopularAssetsLocal",
Err: &err,
RequireWallet: true,
})
defer fin()
if err != nil {
return res, err
}
remoteArg := stellar1.ListPopularAssetsArg{}
return stellar.ListPopularAssets(mctx, s.remoter, remoteArg)
}
| {
return res, err
} |
time_freq_scoping_factory.py | """
time_freq_scoping_factory
=========================
Contains functions to simplify creating time frequency scopings.
"""
from ansys.dpf.core import Scoping
from ansys.dpf.core import errors as dpf_errors
from ansys.dpf.core.common import locations
from ansys.dpf.core.model import Model
def scoping_by_load_step(load_step, server=None):
"""Create a specific ``ansys.dpf.core.Scoping`` for a given load step.
The returned scoping describes a specific time frequency support element
for a given load step.
Parameters
----------
load_step : int
Load step ID of the specific time frequency scoping.
server : DpfServer, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
scoping : Scoping
Scoping targeting one load step.
"""
scoping = Scoping(server=server, ids=[load_step], location=locations.time_freq_step)
return scoping
def scoping_by_load_steps(load_steps, server=None):
"""Create a specific :class:`ansys.dpf.core.Scoping` for a given list of load steps.
The returned scoping describes a specific time frequency support element
for a given list of load steps.
Parameters
----------
load_steps : list[int]
List of load steps IDs of the specific time frequency scoping.
server : DpfServer, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
scoping : Scoping
Scoping targeting several load_steps.
"""
if not isinstance(load_steps, list):
raise dpf_errors.InvalidTypeError("list", "load_steps")
scoping = Scoping(server=server, ids=load_steps, location=locations.time_freq_step)
return scoping
def scoping_by_set(cumulative_set, server=None):
"""Create a specific :class:`ansys.dpf.core.Scoping` for a given cumulative set index.
The returned scoping describes a specific time frequency support element for a given
cumulative set index.
Parameters
----------
cumulative_set : int
Cumulative index of the set.
server : DpfServer, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns | Scoping targeting one set (referenced by cumulative index).
"""
scoping = Scoping(server=server, ids=[cumulative_set], location=locations.time_freq)
return scoping
def scoping_by_sets(cumulative_sets, server=None):
"""Create a specific :class:`ansys.dpf.core.Scoping` for a given list of cumulative set indices.
The returned scoping describes a specific time frequency support element for a given
list of cumulative sets of indices.
Parameters
----------
cumulative_sets : list[int]
List of cumulative indices of the sets.
server : DpfServer, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
scoping : Scoping
Scoping targeting severals sets (referenced by cumulative indices).
"""
if not isinstance(cumulative_sets, list):
raise dpf_errors.InvalidTypeError("list", "cumulative_sets")
scoping = Scoping(server=server, ids=cumulative_sets, location=locations.time_freq)
return scoping
def scoping_by_step_and_substep(
load_step_id, subset_id, time_freq_support
):
"""Create a specific :class:`ansys.dpf.core.Scoping` for a given step and subset.
The returned scoping describes a specific time frequency support element for a given
step and substep.
Parameters
----------
load_step_id : int
ID of the load step.
subset_id : int
ID of the subset.
time_freq_support : TimeFreqSupport
Returns
-------
scoping : Scoping
Scoping based on a given step and substep of a time frequency support.
"""
set_index = time_freq_support.get_cumulative_index(load_step_id - 1, subset_id - 1)
scoping = Scoping(
ids=[set_index + 1],
location=locations.time_freq,
server=time_freq_support._server)
return scoping
def scoping_by_step_and_substep_from_model(load_step_id, subset_id, model, server=None):
"""Create a specific ``ansys.dpf.core.Scoping`` for a given step and substep.
The returned scoping describes a specific model's time freq support element for a given
step and substep.
Parameters
----------
load_step_id : int
ID of the step.
subset_id : int
ID of the subset.
model : Model
server : DpfServer, optional
Server with the channel connected to the remote or local instance.
The default is ``None``, in which case an attempt is made to use the
global server.
Returns
-------
scoping : Scoping
Scoping based on a given step/substep of a model's time_freq_support."""
return scoping_by_step_and_substep(
load_step_id, subset_id, model.metadata.time_freq_support
)
def scoping_on_all_time_freqs(tf_support_or_model):
"""Create a specific :class:`ansys.dpf.core.Scoping` with all time or
frequency sets of a :class:`ansys.dpf.core.TimeFreqSupport` or a class:`ansys.dpf.core.Model`
Parameters
----------
tf_support_or_model : TimeFreqSupport, Model
Returns
-------
scoping : Scoping
Scoping with all time or frequency sets IDs.
"""
if isinstance(tf_support_or_model, Model):
tf_support_or_model = tf_support_or_model.metadata.time_freq_support
return Scoping(
ids=range(1, len(tf_support_or_model.time_frequencies) + 1),
location=locations.time_freq,
server=tf_support_or_model._server) | -------
scoping : Scoping |
watch_dns.rs | use core_foundation::{
array::CFArray,
base::{CFType, TCFType, ToVoid},
dictionary::CFDictionary,
propertylist::CFPropertyList,
runloop::{kCFRunLoopCommonModes, CFRunLoop},
string::CFString,
};
use system_configuration::{
dynamic_store::{SCDynamicStore, SCDynamicStoreBuilder, SCDynamicStoreCallBackContext},
sys::schema_definitions::kSCPropNetDNSServerAddresses,
};
// This example will watch the dynamic store for changes to any DNS setting. As soon as a change
// is detected, it will be printed to stdout.
fn main() |
/// This struct acts as a user provided context/payload to each notification callback.
/// Here one can store any type of data or state needed in the callback function.
#[derive(Debug)]
struct Context {
call_count: u64,
}
#[allow(clippy::needless_pass_by_value)]
fn my_callback(store: SCDynamicStore, changed_keys: CFArray<CFString>, context: &mut Context) {
context.call_count += 1;
println!("Callback call count: {}", context.call_count);
for key in changed_keys.iter() {
if let Some(addresses) = get_dns(&store, key.clone()) {
println!("{} changed DNS to {:?}", *key, addresses);
} else {
println!("{} removed DNS", *key);
}
}
}
fn get_dns(store: &SCDynamicStore, path: CFString) -> Option<Vec<String>> {
let dns_settings = store
.get(path)
.and_then(CFPropertyList::downcast_into::<CFDictionary>)?;
let address_array = dns_settings
.find(unsafe { kSCPropNetDNSServerAddresses }.to_void())
.map(|ptr| unsafe { CFType::wrap_under_get_rule(*ptr) })
.and_then(CFType::downcast_into::<CFArray>)?;
let mut result = Vec::with_capacity(address_array.len() as usize);
for address_ptr in &address_array {
let address =
unsafe { CFType::wrap_under_get_rule(*address_ptr) }.downcast_into::<CFString>()?;
result.push(address.to_string())
}
Some(result)
}
| {
let callback_context = SCDynamicStoreCallBackContext {
callout: my_callback,
info: Context { call_count: 0 },
};
let store = SCDynamicStoreBuilder::new("my-watch-dns-store")
.callback_context(callback_context)
.build();
let watch_keys: CFArray<CFString> = CFArray::from_CFTypes(&[]);
let watch_patterns =
CFArray::from_CFTypes(&[CFString::from("(State|Setup):/Network/Service/.*/DNS")]);
if store.set_notification_keys(&watch_keys, &watch_patterns) {
println!("Registered for notifications");
} else {
panic!("Unable to register notifications");
}
let run_loop_source = store.create_run_loop_source();
let run_loop = CFRunLoop::get_current();
run_loop.add_source(&run_loop_source, unsafe { kCFRunLoopCommonModes });
println!("Entering run loop");
CFRunLoop::run_current();
} |
0004_recipe.py | # Generated by Django 3.0.5 on 2020-04-11 04:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| Migration |
test_distributed.py | """ isort:skip_file """
import pickle
import pytest
dask = pytest.importorskip("dask") # isort:skip
distributed = pytest.importorskip("distributed") # isort:skip
from dask.distributed import Client, Lock
from distributed.utils_test import cluster, gen_cluster
from distributed.utils_test import loop
from distributed.client import futures_of
import xarray as xr
from xarray.backends.locks import HDF5_LOCK, CombinedLock
from xarray.tests.test_backends import (
ON_WINDOWS,
create_tmp_file,
create_tmp_geotiff,
open_example_dataset,
)
from xarray.tests.test_dataset import create_test_data
from . import (
assert_allclose,
has_h5netcdf,
has_netCDF4,
requires_rasterio,
has_scipy,
requires_zarr,
requires_cfgrib,
)
# this is to stop isort throwing errors. May have been easier to just use
# `isort:skip` in retrospect
da = pytest.importorskip("dask.array")
loop = loop # loop is an imported fixture, which flake8 has issues ack-ing
@pytest.fixture
def tmp_netcdf_filename(tmpdir):
return str(tmpdir.join("testfile.nc"))
ENGINES = []
if has_scipy:
ENGINES.append("scipy")
if has_netCDF4:
ENGINES.append("netcdf4")
if has_h5netcdf:
ENGINES.append("h5netcdf")
NC_FORMATS = {
"netcdf4": [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT_OFFSET",
"NETCDF3_64BIT_DATA",
"NETCDF4_CLASSIC",
"NETCDF4",
],
"scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"],
"h5netcdf": ["NETCDF4"],
}
ENGINES_AND_FORMATS = [
("netcdf4", "NETCDF3_CLASSIC"),
("netcdf4", "NETCDF4_CLASSIC"),
("netcdf4", "NETCDF4"),
("h5netcdf", "NETCDF4"),
("scipy", "NETCDF3_64BIT"),
]
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_netcdf_roundtrip(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
if engine == "scipy":
with pytest.raises(NotImplementedError):
original.to_netcdf(
tmp_netcdf_filename, engine=engine, format=nc_format
)
return
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_read_netcdf_integration_test(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data()
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_zarr
@pytest.mark.parametrize("consolidated", [True, False])
@pytest.mark.parametrize("compute", [True, False])
def test_dask_distributed_zarr_integration_test(loop, consolidated, compute) -> None:
if consolidated:
pytest.importorskip("zarr", minversion="2.2.1.dev2")
write_kwargs = {"consolidated": True}
read_kwargs = {"backend_kwargs": {"consolidated": True}}
else:
write_kwargs = read_kwargs = {} # type: ignore
chunks = {"dim1": 4, "dim2": 3, "dim3": 5}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
with create_tmp_file(
allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc"
) as filename:
maybe_futures = original.to_zarr(
filename, compute=compute, **write_kwargs
)
if not compute:
maybe_futures.compute()
with xr.open_dataset(
filename, chunks="auto", engine="zarr", **read_kwargs
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_rasterio
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_rasterio_integration_test(loop) -> None:
with create_tmp_geotiff() as (tmp_file, expected):
with cluster() as (s, [a, b]):
with pytest.warns(DeprecationWarning), Client(s["address"], loop=loop):
da_tiff = xr.open_rasterio(tmp_file, chunks={"band": 1})
assert isinstance(da_tiff.data, da.Array)
actual = da_tiff.compute()
assert_allclose(actual, expected)
@requires_cfgrib
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_cfgrib_integration_test(loop) -> None:
|
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_async(c, s, a, b) -> None:
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({"dim2": 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = await future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.tasks
def test_hdf5_lock() -> None:
assert isinstance(HDF5_LOCK, dask.utils.SerializableLock)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_serializable_locks(c, s, a, b) -> None:
def f(x, lock=None):
with lock:
return x + 1
# note, the creation of Lock needs to be done inside a cluster
for lock in [
HDF5_LOCK,
Lock(),
Lock("filename.nc"),
CombinedLock([HDF5_LOCK]),
CombinedLock([HDF5_LOCK, Lock("filename.nc")]),
]:
futures = c.map(f, list(range(10)), lock=lock)
await c.gather(futures)
lock2 = pickle.loads(pickle.dumps(lock))
assert type(lock) == type(lock2)
| with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
with open_example_dataset(
"example.grib", engine="cfgrib", chunks={"time": 1}
) as ds:
with open_example_dataset("example.grib", engine="cfgrib") as expected:
assert isinstance(ds["t"].data, da.Array)
actual = ds.compute()
assert_allclose(actual, expected) |
parser.rs | use super::Expression;
use crate::{
BinaryOp, ClassDefinition, CollectedErrors, CollectibleErrors, FuncDefinition, FuncType,
LogicalBinaryOp, LoxError, Nil, Position, PositionedToken, SimpleToken, Statement, Token,
UnaryOp, Value,
};
use std::collections::HashMap;
pub struct Parser<Iter: CollectedErrors<Item = PositionedToken>> {
tokens: Iter,
next: Option<PositionedToken>,
last_token_pos: Option<Position>,
errors: Option<Vec<LoxError>>,
}
macro_rules! binary_expression {
($prsr:ident.match_binary_expression($next:ident) { $(SimpleToken::$token:ident => BinaryOp::$op:ident,)+ }) => {
binary_expression! {
$prsr.match_internal_binary_expression(Binary, $next) {
$(SimpleToken::$token => BinaryOp::$op,)+
}
}
};
($prsr:ident.match_logical_binary_expression($next:ident) { $(SimpleToken::$token:ident => LogicalBinaryOp::$op:ident,)+ }) => {
binary_expression! {
$prsr.match_internal_binary_expression(LogicalBinary, $next) {
$(SimpleToken::$token => LogicalBinaryOp::$op,)+
}
}
};
($prsr:ident.match_internal_binary_expression($expr:ident, $next:ident) { $(SimpleToken::$token:ident => $op_type:ident::$op:ident,)+ }) => {
let mut left_expr = $prsr.$next();
loop {
match $prsr.peek_token() {
$(Some(Token::Simple(SimpleToken::$token)) => {
// Skip past the operator since we already know what it is
$prsr.advance_and_discard();
let right_expr = $prsr.$next();
left_expr = Expression::$expr($prsr.current_position(), Box::new(left_expr), $op_type::$op, Box::new(right_expr));
},)+
_ => break left_expr,
}
}
};
}
impl<Iter: CollectedErrors<Item = PositionedToken>> Parser<Iter> {
fn new(mut iter: Iter) -> Self {
let next = iter.next();
Self {
tokens: iter,
next,
last_token_pos: None,
errors: None,
}
}
fn emit_error(&mut self, error: LoxError) {
self.errors.get_or_insert_with(Vec::new).push(error)
}
fn error_expression(&self) -> Expression {
assert!(
self.errors.as_ref().map(Vec::len).unwrap_or(0) > 0,
"No error has been emitted"
);
Expression::ErrorPlaceholder(self.current_position())
}
fn error_statement(&self) -> Statement {
assert!(
self.errors.as_ref().map(Vec::len).unwrap_or(0) > 0,
"No error has been emitted"
);
Statement::ErrorPlaceholder(self.current_position())
}
fn peek(&mut self) -> Option<&PositionedToken> {
self.next.as_ref()
}
fn peek_token(&mut self) -> Option<&Token> {
self.peek().map(|t| t.token())
}
fn advance_token(&mut self) -> Option<Token> {
let next = std::mem::replace(&mut self.next, self.tokens.next());
next.map(|t| {
let (token, token_pos) = t.take();
self.last_token_pos.replace(token_pos);
token
})
}
fn advance_and_discard(&mut self) {
self.advance_token();
}
fn advance_identifier(&mut self) -> Option<String> {
match self.peek_token() {
Some(Token::Identifier(name)) => {
let name = name.to_string();
self.advance_and_discard();
Some(name)
}
_ => {
self.emit_error(LoxError::ExpectedIdentifier(self.current_position()));
None
}
}
}
fn current_position(&self) -> Position {
self.last_token_pos.clone().unwrap()
}
fn map_next_token<R, F: FnOnce(&Token) -> R>(&mut self, f: F) -> Option<R> {
self.peek_token().map(f)
}
fn match_next(&mut self, expected: SimpleToken) -> bool {
self.map_next_token(|token| matches!(token, Token::Simple(token) if *token == expected))
.unwrap_or(false)
}
fn match_simple(&mut self) -> Option<SimpleToken> {
self.peek_token().and_then(|token| match token {
Token::Simple(simple) => Some(*simple),
_ => None,
})
}
fn consume_next(&mut self, expected: SimpleToken) -> bool {
if self.match_next(expected) {
self.advance_and_discard();
true
} else {
false
}
}
fn expect_next(&mut self, expected: SimpleToken) -> bool {
match self.advance_token() {
Some(Token::Simple(token)) if token == expected => true,
Some(unexpected_token) => {
self.emit_error(LoxError::UnexpectedToken(
self.current_position(),
unexpected_token,
));
false
}
None => {
self.emit_error(LoxError::UnexpectedEndOfFile(self.current_position()));
false
}
}
}
fn expect_and_skip_to(&mut self, expected: SimpleToken) {
// For use at the end of statements. Allows us to error and skip any garbage at the end of a statement
if !self.expect_next(expected) {
self.skip_until(expected);
}
}
fn skip_until(&mut self, target: SimpleToken) {
loop {
match self.advance_token() {
Some(Token::Simple(SimpleToken::EndOfFile)) | None => break,
Some(Token::Simple(token)) if token == target => break,
Some(_) => (),
}
}
}
fn assert_next(&mut self, expected: SimpleToken) {
assert_eq!(
self.advance_token(),
Some(Token::Simple(expected)),
"This token type should already have been checked - something is very wrong"
);
}
fn declaration(&mut self) -> Statement {
match self.match_simple() {
Some(SimpleToken::Var) => self.var_declaration(),
Some(SimpleToken::Fun) => self.func_declaration(),
Some(SimpleToken::Class) => self.class_declaration(),
_ => self.statement(),
}
}
fn var_declaration(&mut self) -> Statement {
self.assert_next(SimpleToken::Var);
let ret = self.advance_identifier().map(|identifier| {
let identifier_pos = self.current_position();
// Error handling in here is interesting. If we fail to parse an expression, we just behave as if
// the equals sign wasn't there. Whatever happens we skip until the semicolon.
let expr = if self.consume_next(SimpleToken::Equal) {
self.expression()
} else {
Expression::Literal(self.current_position(), Value::from(Nil))
};
Statement::VarDeclaration(identifier_pos, identifier, expr)
});
self.expect_and_skip_to(SimpleToken::Semicolon);
ret.unwrap_or_else(|| self.error_statement())
}
fn func_declaration_impl(
&mut self,
func_type: FuncType,
identifier: String,
) -> Option<FuncDefinition> {
if self.expect_next(SimpleToken::LeftParen) {
let mut parameters = Vec::new();
if !self.consume_next(SimpleToken::RightParen) {
loop {
parameters.push(match self.advance_identifier() {
Some(name) => name,
None => {
// Skip the token and use a dummy name.
self.advance_and_discard();
"PARAMETER ERROR".into()
}
});
if !self.consume_next(SimpleToken::Comma) {
self.expect_and_skip_to(SimpleToken::RightParen);
break;
}
}
}
let body = self.block();
Some(FuncDefinition::new(func_type, identifier, parameters, body))
} else {
self.skip_until(SimpleToken::RightBrace);
None
}
}
fn func_declaration(&mut self) -> Statement {
self.assert_next(SimpleToken::Fun);
self.advance_identifier()
.and_then(|identifier| self.func_declaration_impl(FuncType::Function, identifier))
.map(|func_definition| {
Statement::FuncDeclaration(self.current_position(), func_definition)
})
.unwrap_or_else(|| self.error_statement())
}
fn method_declaration(&mut self, name: String) -> Option<FuncDefinition> {
if name == "init" {
self.func_declaration_impl(FuncType::Initializer, name)
} else {
self.func_declaration_impl(FuncType::Method, name)
}
}
fn class_declaration(&mut self) -> Statement {
self.assert_next(SimpleToken::Class);
let start_pos = self.current_position();
match self.advance_identifier() {
Some(class_identifier) => {
let superclass_identifier = if self.consume_next(SimpleToken::Less) {
Some(self.advance_identifier().unwrap_or("UNKNOWN CLASS".into()))
} else {
None
};
if self.expect_next(SimpleToken::LeftBrace) {
let mut methods = HashMap::new();
while !self.match_next(SimpleToken::RightBrace) {
let method_identifier =
self.advance_identifier().unwrap_or("FAKE METHOD".into());
if let Some(method_definition) =
self.method_declaration(method_identifier.clone())
{
methods.insert(method_identifier, method_definition);
}
}
self.expect_and_skip_to(SimpleToken::RightBrace);
Statement::ClassDeclaration(
start_pos,
ClassDefinition::new(class_identifier, superclass_identifier, methods),
)
} else {
self.skip_until(SimpleToken::RightBrace);
self.error_statement()
}
}
None => self.error_statement(),
}
}
fn statement(&mut self) -> Statement {
match self.peek_token() {
Some(Token::Simple(SimpleToken::Print)) => self.print_statement(),
Some(Token::Simple(SimpleToken::LeftBrace)) => self.block(),
Some(Token::Simple(SimpleToken::If)) => self.if_statement(),
Some(Token::Simple(SimpleToken::While)) => self.while_statement(),
Some(Token::Simple(SimpleToken::For)) => self.for_statement(),
Some(Token::Simple(SimpleToken::Return)) => self.return_statement(),
_ => self.expression_statement(),
}
}
fn expression_statement(&mut self) -> Statement {
let expr = self.expression();
self.expect_and_skip_to(SimpleToken::Semicolon);
Statement::Expression(self.current_position(), expr)
}
fn print_statement(&mut self) -> Statement {
self.assert_next(SimpleToken::Print);
let print_pos = self.current_position();
let expr = self.expression();
self.expect_and_skip_to(SimpleToken::Semicolon);
Statement::Print(print_pos, expr)
}
fn if_statement(&mut self) -> Statement {
self.assert_next(SimpleToken::If);
let if_position = self.current_position();
self.expect_and_skip_to(SimpleToken::LeftParen);
let condition = self.expression();
self.expect_and_skip_to(SimpleToken::RightParen);
let then_branch = self.statement();
let else_branch = if self.consume_next(SimpleToken::Else) {
Some(self.statement())
} else {
None
};
Statement::If(
if_position,
condition,
Box::new(then_branch),
else_branch.map(Box::new),
)
}
fn block(&mut self) -> Statement {
self.assert_next(SimpleToken::LeftBrace);
let block_pos = self.current_position();
let mut statements = Vec::new();
loop {
if self.consume_next(SimpleToken::RightBrace) {
break Statement::Block(block_pos, statements);
} else {
statements.push(self.declaration());
}
}
}
fn while_statement(&mut self) -> Statement {
self.assert_next(SimpleToken::While);
if self.expect_next(SimpleToken::LeftParen) {
let condition = self.expression();
self.expect_and_skip_to(SimpleToken::RightParen);
Statement::While(
condition.position().clone(),
condition,
Box::new(self.statement()),
)
} else {
self.skip_until(SimpleToken::Semicolon);
self.error_statement()
}
}
fn for_statement(&mut self) -> Statement {
self.assert_next(SimpleToken::For);
if self.expect_next(SimpleToken::LeftParen) {
let initializer = if self.consume_next(SimpleToken::Semicolon) {
None
} else if self.match_next(SimpleToken::Var) {
Some(self.var_declaration())
} else {
Some(self.expression_statement())
};
let condition = if self.match_next(SimpleToken::Semicolon) {
Expression::Literal(self.current_position(), Value::from(true))
} else {
self.expression()
};
self.expect_and_skip_to(SimpleToken::Semicolon);
let increment = if self.match_next(SimpleToken::RightParen) {
None
} else {
Some(self.expression())
};
self.expect_and_skip_to(SimpleToken::RightParen);
let body = self.statement();
let body = match increment {
Some(increment) => Statement::Block(
body.position().clone(),
vec![
body,
Statement::Expression(increment.position().clone(), increment),
], | ),
None => body,
};
let looper = Statement::While(condition.position().clone(), condition, Box::new(body));
match initializer {
Some(initializer) => {
Statement::Block(looper.position().clone(), vec![initializer, looper])
}
None => looper,
}
} else {
self.skip_until(SimpleToken::RightBrace);
self.error_statement()
}
}
fn return_statement(&mut self) -> Statement {
self.assert_next(SimpleToken::Return);
let start_pos = self.current_position();
let expr = if self.match_next(SimpleToken::Semicolon) {
None
} else {
Some(self.expression())
};
self.expect_and_skip_to(SimpleToken::Semicolon);
Statement::Return(start_pos, expr)
}
fn expression(&mut self) -> Expression {
self.comma_sequence()
}
fn comma_sequence(&mut self) -> Expression {
binary_expression! { self.match_binary_expression(assignment) {
SimpleToken::Comma => BinaryOp::Comma,
} }
}
fn assignment(&mut self) -> Expression {
match self.logic_or() {
Expression::VariableGet(_, identifier) if self.consume_next(SimpleToken::Equal) => {
Expression::Assignment(
self.current_position(),
identifier.to_string(),
Box::new(self.assignment()),
)
}
Expression::Get(_, expr, identifier) if self.consume_next(SimpleToken::Equal) => {
Expression::Set(
self.current_position(),
expr,
identifier.to_string(),
Box::new(self.assignment()),
)
}
expr => expr,
}
}
fn logic_or(&mut self) -> Expression {
binary_expression! { self.match_logical_binary_expression(logic_and) {
SimpleToken::Or => LogicalBinaryOp::Or,
} }
}
fn logic_and(&mut self) -> Expression {
binary_expression! { self.match_logical_binary_expression(equality) {
SimpleToken::And => LogicalBinaryOp::And,
} }
}
fn equality(&mut self) -> Expression {
binary_expression! { self.match_binary_expression(ternary) {
SimpleToken::EqualEqual => BinaryOp::EqualEqual,
SimpleToken::BangEqual => BinaryOp::BangEqual,
} }
}
fn ternary(&mut self) -> Expression {
let comparison_expr = self.comparison();
if self.consume_next(SimpleToken::QuestionMark) {
let true_expr = self.expression();
self.expect_and_skip_to(SimpleToken::Colon);
let false_expr = self.expression();
Expression::Ternary(
self.current_position(),
Box::new(comparison_expr),
Box::new(true_expr),
Box::new(false_expr),
)
} else {
comparison_expr
}
}
fn comparison(&mut self) -> Expression {
binary_expression! { self.match_binary_expression(term) {
SimpleToken::Greater => BinaryOp::Greater,
SimpleToken::GreaterEqual => BinaryOp::GreaterEqual,
SimpleToken::Less => BinaryOp::Less,
SimpleToken::LessEqual => BinaryOp::LessEqual,
} }
}
fn term(&mut self) -> Expression {
binary_expression! { self.match_binary_expression(factor) {
SimpleToken::Plus => BinaryOp::Plus,
SimpleToken::Minus => BinaryOp::Minus,
} }
}
fn factor(&mut self) -> Expression {
binary_expression! { self.match_binary_expression(unary) {
SimpleToken::Star => BinaryOp::Star,
SimpleToken::Slash => BinaryOp::Slash,
} }
}
fn unary(&mut self) -> Expression {
if self.consume_next(SimpleToken::Minus) {
let position = self.current_position();
Expression::Unary(position, UnaryOp::Minus, Box::new(self.unary()))
} else if self.consume_next(SimpleToken::Bang) {
let position = self.current_position();
Expression::Unary(position, UnaryOp::Bang, Box::new(self.unary()))
} else if self.consume_next(SimpleToken::LeftParen) {
let position = self.current_position();
let expr = self.expression();
self.expect_and_skip_to(SimpleToken::RightParen);
Expression::Unary(position, UnaryOp::Grouping, Box::new(expr))
} else {
self.call()
}
}
fn finish_call(&mut self, callee: Expression) -> Expression {
let mut arguments = Vec::new();
if !self.consume_next(SimpleToken::RightParen) {
loop {
// Parse a single argument - use assignment so we don't parse comma sequences
arguments.push(self.assignment());
if !self.consume_next(SimpleToken::Comma) {
self.expect_and_skip_to(SimpleToken::RightParen);
break Expression::Call(self.current_position(), Box::new(callee), arguments);
}
}
} else {
Expression::Call(self.current_position(), Box::new(callee), arguments)
}
}
fn call(&mut self) -> Expression {
let mut expr = self.primary();
loop {
if self.consume_next(SimpleToken::LeftParen) {
expr = self.finish_call(expr);
} else if self.consume_next(SimpleToken::Dot) {
match self.advance_identifier() {
Some(identifier) => {
expr = Expression::Get(self.current_position(), Box::new(expr), identifier);
}
None => {
// Return what we have and hope we can go on
break self.error_expression();
}
}
} else {
break expr;
}
}
}
fn primary(&mut self) -> Expression {
match self.advance_token() {
Some(Token::Literal(value)) => Expression::Literal(self.current_position(), value),
Some(Token::Identifier(name)) => Expression::VariableGet(self.current_position(), name),
Some(Token::Simple(SimpleToken::Nil)) => {
Expression::Literal(self.current_position(), Nil.into())
}
Some(Token::Simple(SimpleToken::True)) => {
Expression::Literal(self.current_position(), true.into())
}
Some(Token::Simple(SimpleToken::False)) => {
Expression::Literal(self.current_position(), false.into())
}
Some(Token::Simple(SimpleToken::This)) => {
Expression::This(self.current_position(), "this".to_string())
}
Some(Token::Simple(SimpleToken::Super)) => {
if self.expect_next(SimpleToken::Dot) {
match self.advance_identifier() {
Some(identifier) => Expression::Super(
self.current_position(),
"this".to_string(),
"super".to_string(),
identifier,
),
None => self.error_expression(),
}
} else {
// Treat super without a dot as an access to a variable
// called super, which obviously can't exist.
self.emit_error(LoxError::ExpectedToken(
self.current_position(),
SimpleToken::Dot,
));
Expression::VariableGet(self.current_position(), "super".to_string())
}
}
Some(token) => {
self.emit_error(LoxError::UnexpectedToken(self.current_position(), token));
self.error_expression()
}
None => panic!("End of file token missing"),
}
}
}
impl<Iter: CollectedErrors<Item = PositionedToken>> Iterator for Parser<Iter> {
type Item = Statement;
fn next(&mut self) -> Option<Self::Item> {
match self.peek().map(|t| t.token()) {
Some(Token::Simple(SimpleToken::EndOfFile)) | None => None,
_ => Some(self.declaration()),
}
}
}
impl<Iter: CollectedErrors<Item = PositionedToken>> CollectedErrors for Parser<Iter> {
fn errors(self) -> Option<Vec<LoxError>> {
match (self.tokens.errors(), self.errors) {
(Some(mut token_errors), Some(mut errors)) => {
token_errors.append(&mut errors);
Some(token_errors)
}
(Some(errors), None) | (None, Some(errors)) => Some(errors),
(None, None) => None,
}
}
}
pub fn parse(
tokens: impl CollectibleErrors<Item = PositionedToken>,
) -> impl CollectedErrors<Item = Statement> {
Parser::new(tokens.collect_errors())
}
pub trait Parseable {
type Parser: CollectedErrors<Item = Statement>;
fn parse(self) -> Self::Parser;
}
impl<Iter: CollectibleErrors<Item = PositionedToken>> Parseable for Iter {
type Parser = Parser<Iter::Collector>;
fn parse(self) -> Self::Parser {
Self::Parser::new(self.collect_errors())
}
} | |
util_test.go | package helper
import (
"github.com/joeqian10/neo-gogogo/crypto"
"testing"
"github.com/stretchr/testify/assert"
)
func TestReverseBytes(t *testing.T) {
var b = make([]byte, 0)
r := ReverseBytes(b)
assert.Equal(t, b, r)
b = []byte{1}
r = ReverseBytes(b)
assert.Equal(t, b, r)
b = []byte{1, 2}
r = ReverseBytes(b)
assert.Equal(t, []byte{2, 1}, r)
b = []byte{1, 2, 3}
r = ReverseBytes(b)
assert.Equal(t, []byte{1, 2, 3}, b)
assert.Equal(t, []byte{3, 2, 1}, r)
}
func TestBytesToScriptHash(t *testing.T) {
script := []byte{ 0x01, 0x02, 0x03, 0x04 }
hash := crypto.Hash160(script)
scriptHash, _ := BytesToScriptHash(script)
assert.Equal(t, "ecd2cbd8262d2c361b93bf89c4f0a78d76a16e70", BytesToHex(hash))
assert.Equal(t, "706ea1768da7f0c489bf931b362c2d26d8cbd2ec", scriptHash.String())
}
//func Test(t *testing.T) {
// //var v = int((0x30 - 27) & ^byte(4)) // 0001_0101 & ^ 0000_0100 = 0001_0001 = 17
// // //assert.Equal(t, 0xfb, v)
//
// p := 34 // 0010_0010
// q := 20 // 0001_0100
// //
// assert.Equal(t, 34, p& ^q)
//}
//func TestHashToInt(t *testing.T) {
// s := "Hello World"
// encoded := []byte(s); | // hash := keccak.Sum(nil)
//
// bi := HashToInt(hash)
//
// assert.Equal(t, 0, bi)
//} | // keccak := sha3.NewLegacyKeccak256()
// keccak.Write(encoded) |
checkin.py | # -*- coding: utf-8 -*-
import gsxws
import phonenumbers
from django import forms
from datetime import date
from django.conf import settings
from django_countries import countries
from django.core.validators import RegexValidator
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.forms import SelectDateWidget
from servo.validators import (apple_sn_validator,
phone_validator,
file_upload_validator,)
from servo.forms.base import SearchFieldInput
from servo.models import (Configuration, Device,
Attachment, Location, | # Generate list of years for purchase date picker
y = date.today().year
YEARS = [x + 1 for x in range(y - 10, y)]
def get_checkin_locations(user):
"""
Return possible checkin location choices for this user.
"""
if user.is_authenticated:
return user.locations.enabled()
else:
return User.get_checkin_user().locations.enabled()
class ConfirmationForm(forms.Form):
confirm = forms.BooleanField(required=False)
class DeviceForm(forms.ModelForm):
"""
Form for entering devices in the /checkin view
"""
required_css_class = 'required'
accessories = forms.CharField(
required=False,
label=_('Accessories'),
widget=forms.Textarea(attrs={'class': 'span12', 'rows': 3}),
help_text=_("Please list here any accessories you'd like to check in with your device (cables, power adapters, bags, etc)")
)
pop = forms.FileField(
required=False,
label=_('Proof of Purchase'),
validators=[file_upload_validator],
help_text=_('Proof of Purchase is required when setting purchase date manually')
)
condition = forms.CharField(
required=False,
label=_('Condition of device'),
widget=forms.Textarea(attrs={'class': 'span12', 'rows': 3}),
help_text=_('Please describe the condition of the device. Will be shown on the print-out.')
)
queue = forms.ModelChoiceField(
label=_('Queue'),
required=False,
queryset=Queue.objects.all(),
help_text=_('Assign order to this queue')
)
class Meta:
model = Device
fields = (
'description',
'sn',
'imei',
'purchased_on',
'purchase_country',
'username',
'password',
)
widgets = {
'sn': SearchFieldInput(),
'password': forms.PasswordInput(),
'username': forms.TextInput(),
'purchased_on': SelectDateWidget(years=YEARS),
'warranty_status': forms.Select(attrs={'readonly': 'readonly'}),
}
def __init__(self, *args, **kwargs):
super(DeviceForm, self).__init__(*args, **kwargs)
if Configuration.false('checkin_require_password'):
self.fields['password'].required = False
if Configuration.true('checkin_require_condition'):
self.fields['condition'].required = True
if kwargs.get('instance'):
prod = gsxws.Product('')
prod.description = self.instance.description
if prod.is_ios:
self.fields['password'].label = _('Passcode')
if not prod.is_ios:
del(self.fields['imei'])
if not prod.is_mac:
del(self.fields['username'])
if Configuration.true('checkin_password'):
self.fields['password'].widget = forms.TextInput(attrs={'class': 'span12'})
class CustomerForm(forms.Form):
"""
Form for entering customer info in /checkin
Not using a ModelForm for a reason.
"""
required_css_class = 'required'
fname = forms.CharField(label=_('First name'))
lname = forms.CharField(label=_('Last name'))
company = forms.CharField(
required=False,
label=_('Company (optional)')
)
email = forms.EmailField(
label=_('Email address'),
widget=forms.TextInput(attrs={'class': 'span12'})
)
phone = forms.CharField(label=_('Phone number'))
address = forms.CharField(label=_('Address'))
country = forms.ChoiceField(
label=_('Country'),
choices=Customer.COUNTRY_CHOICES,
initial=settings.INSTALL_COUNTRY.upper()
)
city = forms.CharField(label=_('City'))
postal_code = forms.CharField(label=_('Postal Code'))
checkin_location = forms.ModelChoiceField(
empty_label=None,
label=_(u'Check-in location'),
queryset=Location.objects.enabled(),
widget=forms.Select(attrs={'class': 'span12'}),
help_text=_('Choose where you want to leave the device')
)
checkout_location = forms.ModelChoiceField(
empty_label=None,
label=_(u'Check-out location'),
queryset=Location.objects.enabled(),
widget=forms.Select(attrs={'class': 'span12'}),
help_text=_('Choose where you want to pick up the device')
)
TERMS = _('I agree to the <a href="/checkin/terms/" target="_blank">terms of service.</a>')
agree_to_terms = forms.BooleanField(initial=False, label=mark_safe(TERMS))
notify_by_sms = forms.BooleanField(
initial=True,
required=False,
label=_('Notify by SMS')
)
notify_by_email = forms.BooleanField(
initial=True,
required=False,
label=_('Notify by Email')
)
def __init__(self, request, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
location = request.session['checkin_location']
locations = get_checkin_locations(request.user)
self.show_location_picker = len(locations) > 1
self.fields['checkin_location'].initial = location
self.fields['checkout_location'].initial = location
if self.show_location_picker:
self.fields['checkin_location'].queryset = locations
self.fields['checkout_location'].queryset = locations
else:
self.fields['checkin_location'].widget = forms.HiddenInput()
self.fields['checkout_location'].widget = forms.HiddenInput()
if request.user.is_authenticated:
del(self.fields['agree_to_terms'])
self.fields['phone'].widget = SearchFieldInput()
def clean(self):
cd = super(CustomerForm, self).clean()
phone = cd.get('phone')
country = cd.get('country')
if len(phone) < 1:
return cd
try:
phonenumbers.parse(phone, country)
except phonenumbers.NumberParseException as e:
msg = _('Enter a valid phone number')
self._errors["phone"] = self.error_class([msg])
return cd
def clean_fname(self):
v = self.cleaned_data.get('fname')
return v.capitalize()
def clean_lname(self):
lname = self.cleaned_data.get('lname')
return lname.capitalize()
class AppleSerialNumberForm(forms.Form):
sn = forms.CharField(
min_length=8,
validators=[apple_sn_validator],
label=_(u'Serial number or IMEI')
)
def clean_sn(self):
sn = self.cleaned_data.get('sn')
return sn.upper()
class SerialNumberForm(forms.Form):
sn = forms.CharField(
min_length=8,
label=_(u'Serial number')
)
def clean_sn(self):
sn = self.cleaned_data.get('sn')
return sn.upper()
class StatusCheckForm(forms.Form):
code = forms.CharField(
min_length=8,
label=_('Service Order'),
validators=[RegexValidator(regex=r'\d{8}', message=_('Invalid Service Order number'))]
)
class IssueForm(forms.Form):
required_css_class = 'required'
issue_description = forms.CharField(
min_length=10,
label=_(u'Problem description'),
help_text=_('Will appear on the print-out'),
widget=forms.Textarea(attrs={'class': 'span12'})
)
attachment = forms.FileField(
required=False,
label=_('Attachment'),
validators=[file_upload_validator],
help_text=_('Please use this to attach relevant documents')
)
notes = forms.CharField(
required=False,
label=_(u'Notes for technician'),
widget=forms.Textarea(attrs={'class': 'span12'}),
help_text=_('Will not appear on the print-out')
)
class QuestionForm(forms.Form):
question = forms.CharField(widget=forms.HiddenInput)
answer = forms.CharField(widget=forms.HiddenInput)
class AttachmentForm(forms.ModelForm):
class Meta:
model = Attachment
exclude = [] | Customer, Queue, User,)
|
scale_set.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScaleSetArgs', 'ScaleSet']
@pulumi.input_type
class ScaleSetArgs:
def __init__(__self__, *,
network_profiles: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]],
os_profile: pulumi.Input['ScaleSetOsProfileArgs'],
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['ScaleSetSkuArgs'],
storage_profile_os_disk: pulumi.Input['ScaleSetStorageProfileOsDiskArgs'],
upgrade_policy_mode: pulumi.Input[str],
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ScaleSet resource.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileArgs'] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetSkuArgs'] sku: A sku block as documented below.
:param pulumi.Input['ScaleSetStorageProfileOsDiskArgs'] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input['ScaleSetBootDiagnosticsArgs'] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetOsProfileLinuxConfigArgs'] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input['ScaleSetOsProfileWindowsConfigArgs'] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input['ScaleSetPlanArgs'] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input['ScaleSetRollingUpgradePolicyArgs'] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input['ScaleSetStorageProfileImageReferenceArgs'] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
pulumi.set(__self__, "network_profiles", network_profiles)
pulumi.set(__self__, "os_profile", os_profile)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Input['ScaleSetOsProfileArgs']:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: pulumi.Input['ScaleSetOsProfileArgs']):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['ScaleSetSkuArgs']:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['ScaleSetSkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Input['ScaleSetStorageProfileOsDiskArgs']:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: pulumi.Input['ScaleSetStorageProfileOsDiskArgs']):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Input[str]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: pulumi.Input[str]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
|
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
@pulumi.input_type
class _ScaleSetState:
def __init__(__self__, *,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]] = None,
os_profile: Optional[pulumi.Input['ScaleSetOsProfileArgs']] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input['ScaleSetSkuArgs']] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
storage_profile_os_disk: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ScaleSet resources.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input['ScaleSetBootDiagnosticsArgs'] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileArgs'] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileLinuxConfigArgs'] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input['ScaleSetOsProfileWindowsConfigArgs'] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input['ScaleSetPlanArgs'] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetRollingUpgradePolicyArgs'] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input['ScaleSetSkuArgs'] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input['ScaleSetStorageProfileImageReferenceArgs'] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input['ScaleSetStorageProfileOsDiskArgs'] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if network_profiles is not None:
pulumi.set(__self__, "network_profiles", network_profiles)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if storage_profile_os_disk is not None:
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if upgrade_policy_mode is not None:
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional[pulumi.Input['ScaleSetOsProfileArgs']]:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: Optional[pulumi.Input['ScaleSetOsProfileArgs']]):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ScaleSetSkuArgs']]:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ScaleSetSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
"""
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference")
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class ScaleSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a virtual machine scale set.
## Disclaimers
> **Note:** The `compute.ScaleSet` resource has been superseded by the `compute.LinuxVirtualMachineScaleSet`](linux_virtual_machine_scale_set.html) and `compute.WindowsVirtualMachineScaleSet` resources. The existing `compute.ScaleSet` resource will continue to be available throughout the 2.x releases however is in a feature-frozen state to maintain compatibility - new functionality will instead be added to the `compute.LinuxVirtualMachineScaleSet` and `compute.WindowsVirtualMachineScaleSet` resources.
## Example Usage
### With Managed Disks (Recommended)
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static",
domain_name_label=example_resource_group.name,
tags={
"environment": "staging",
})
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
bpepool = azure.lb.BackendAddressPool("bpepool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
lbnatpool = azure.lb.NatPool("lbnatpool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
frontend_port_start=50000,
frontend_port_end=50119,
backend_port=22,
frontend_ip_configuration_name="PublicIPAddress")
example_probe = azure.lb.Probe("exampleProbe",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Http",
request_path="/health",
port=8080)
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automatic_os_upgrade=True,
upgrade_policy_mode="Rolling",
rolling_upgrade_policy=azure.compute.ScaleSetRollingUpgradePolicyArgs(
max_batch_instance_percent=20,
max_unhealthy_instance_percent=20,
max_unhealthy_upgraded_instance_percent=5,
pause_time_between_batches="PT0S",
),
health_probe_id=example_probe.id,
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
),
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="",
caching="ReadWrite",
create_option="FromImage",
managed_disk_type="Standard_LRS",
),
storage_profile_data_disks=[azure.compute.ScaleSetStorageProfileDataDiskArgs(
lun=0,
caching="ReadWrite",
create_option="Empty",
disk_size_gb=10,
)],
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="mynetworkprofile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
load_balancer_backend_address_pool_ids=[bpepool.id],
load_balancer_inbound_nat_rules_ids=[lbnatpool.id],
)],
)],
tags={
"environment": "staging",
})
```
### With Unmanaged Disks
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location="West US",
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location="westus",
account_tier="Standard",
account_replication_type="LRS",
tags={
"environment": "staging",
})
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location="West US",
resource_group_name=example_resource_group.name,
upgrade_policy_mode="Manual",
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="TestNetworkProfile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
)],
)],
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="osDiskProfile",
caching="ReadWrite",
create_option="FromImage",
vhd_containers=[pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}")],
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
))
```
## Example of storage_profile_image_reference with id
```python
import pulumi
import pulumi_azure as azure
example_image = azure.compute.Image("exampleImage")
# ...
example_scale_set = azure.compute.ScaleSet("exampleScaleSet", storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
id=example_image.id,
))
# ...
```
## Import
Virtual Machine Scale Sets can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/scaleSet:ScaleSet scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScaleSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a virtual machine scale set.
## Disclaimers
> **Note:** The `compute.ScaleSet` resource has been superseded by the `compute.LinuxVirtualMachineScaleSet`](linux_virtual_machine_scale_set.html) and `compute.WindowsVirtualMachineScaleSet` resources. The existing `compute.ScaleSet` resource will continue to be available throughout the 2.x releases however is in a feature-frozen state to maintain compatibility - new functionality will instead be added to the `compute.LinuxVirtualMachineScaleSet` and `compute.WindowsVirtualMachineScaleSet` resources.
## Example Usage
### With Managed Disks (Recommended)
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static",
domain_name_label=example_resource_group.name,
tags={
"environment": "staging",
})
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
bpepool = azure.lb.BackendAddressPool("bpepool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
lbnatpool = azure.lb.NatPool("lbnatpool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
frontend_port_start=50000,
frontend_port_end=50119,
backend_port=22,
frontend_ip_configuration_name="PublicIPAddress")
example_probe = azure.lb.Probe("exampleProbe",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Http",
request_path="/health",
port=8080)
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automatic_os_upgrade=True,
upgrade_policy_mode="Rolling",
rolling_upgrade_policy=azure.compute.ScaleSetRollingUpgradePolicyArgs(
max_batch_instance_percent=20,
max_unhealthy_instance_percent=20,
max_unhealthy_upgraded_instance_percent=5,
pause_time_between_batches="PT0S",
),
health_probe_id=example_probe.id,
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
),
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="",
caching="ReadWrite",
create_option="FromImage",
managed_disk_type="Standard_LRS",
),
storage_profile_data_disks=[azure.compute.ScaleSetStorageProfileDataDiskArgs(
lun=0,
caching="ReadWrite",
create_option="Empty",
disk_size_gb=10,
)],
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="mynetworkprofile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
load_balancer_backend_address_pool_ids=[bpepool.id],
load_balancer_inbound_nat_rules_ids=[lbnatpool.id],
)],
)],
tags={
"environment": "staging",
})
```
### With Unmanaged Disks
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location="West US",
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location="westus",
account_tier="Standard",
account_replication_type="LRS",
tags={
"environment": "staging",
})
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location="West US",
resource_group_name=example_resource_group.name,
upgrade_policy_mode="Manual",
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="TestNetworkProfile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
)],
)],
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="osDiskProfile",
caching="ReadWrite",
create_option="FromImage",
vhd_containers=[pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}")],
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
))
```
## Example of storage_profile_image_reference with id
```python
import pulumi
import pulumi_azure as azure
example_image = azure.compute.Image("exampleImage")
# ...
example_scale_set = azure.compute.ScaleSet("exampleScaleSet", storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
id=example_image.id,
))
# ...
```
## Import
Virtual Machine Scale Sets can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/scaleSet:ScaleSet scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1
```
:param str resource_name: The name of the resource.
:param ScaleSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScaleSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScaleSetArgs.__new__(ScaleSetArgs)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if network_profiles is None and not opts.urn:
raise TypeError("Missing required property 'network_profiles'")
__props__.__dict__["network_profiles"] = network_profiles
if os_profile is None and not opts.urn:
raise TypeError("Missing required property 'os_profile'")
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
if storage_profile_os_disk is None and not opts.urn:
raise TypeError("Missing required property 'storage_profile_os_disk'")
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
if upgrade_policy_mode is None and not opts.urn:
raise TypeError("Missing required property 'upgrade_policy_mode'")
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
super(ScaleSet, __self__).__init__(
'azure:compute/scaleSet:ScaleSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ScaleSet':
"""
Get an existing ScaleSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScaleSetState.__new__(_ScaleSetState)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["network_profiles"] = network_profiles
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
return ScaleSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> pulumi.Output[Optional['outputs.ScaleSetBootDiagnostics']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@property
@pulumi.getter
def extensions(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetExtension']]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.ScaleSetIdentity']:
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[str]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Output[Sequence['outputs.ScaleSetNetworkProfile']]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Output['outputs.ScaleSetOsProfile']:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> pulumi.Output['outputs.ScaleSetOsProfileLinuxConfig']:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetOsProfileSecret']]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> pulumi.Output[Optional['outputs.ScaleSetOsProfileWindowsConfig']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@property
@pulumi.getter
def overprovision(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.ScaleSetPlan']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> pulumi.Output[Optional['outputs.ScaleSetRollingUpgradePolicy']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.ScaleSetSku']:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetStorageProfileDataDisk']]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> pulumi.Output['outputs.ScaleSetStorageProfileImageReference']:
"""
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference")
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Output['outputs.ScaleSetStorageProfileOsDisk']:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Output[str]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
| """
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference") |
prover.rs | // External imports
use anyhow::format_err;
use once_cell::sync::Lazy;
use tokio::sync::Mutex;
// Workspace imports
use zksync_types::{
prover::{ProverJob, ProverJobType},
BlockNumber,
};
// Local imports
use crate::test_data::{gen_sample_block, get_sample_aggregated_proof, get_sample_single_proof};
use crate::tests::db_test;
use crate::{prover::ProverSchema, QueryResult, StorageProcessor};
static MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
async fn get_idle_job_from_queue(mut storage: &mut StorageProcessor<'_>) -> QueryResult<ProverJob> {
let job = ProverSchema(&mut storage)
.get_idle_prover_job_from_job_queue()
.await?;
job.ok_or_else(|| format_err!("expect idle job from job queue"))
}
/// Checks that the `prover_job_queue` correctly processes requests to it.
/// `prover_job_queue` table is locked when accessed, so it cannot be accessed simultaneously.
#[db_test]
async fn test_prover_job_queue(mut storage: StorageProcessor<'_>) -> QueryResult<()> {
// Lock to prevent database deadlock
let _lock = MUTEX.lock().await;
test_store_proof(&mut storage).await?;
pending_jobs_count(&mut storage).await?;
Ok(())
}
/// Checks that the single and aggregated proof can be stored and loaded.
async fn test_store_proof(mut storage: &mut StorageProcessor<'_>) -> QueryResult<()> {
// Attempt to load the proof that was not stored should result in None.
let loaded_proof = ProverSchema(&mut storage)
.load_proof(BlockNumber(1))
.await
.expect("Error while obtaining proof");
let loaded_aggregated_proof = ProverSchema(&mut storage)
.load_aggregated_proof(BlockNumber(1), BlockNumber(1))
.await
.expect("Error while obtaining proof");
assert!(loaded_proof.is_none());
assert!(loaded_aggregated_proof.is_none());
// Attempt to store the proof for which there is no associated job in `job_prover_queue`.
let proof = get_sample_single_proof();
let aggregated_proof = get_sample_aggregated_proof();
let stored_proof = ProverSchema(&mut storage)
.store_proof(1, BlockNumber(1), &proof)
.await;
let stored_aggregated_proof = ProverSchema(&mut storage)
.store_aggregated_proof(1, BlockNumber(1), BlockNumber(1), &aggregated_proof)
.await;
assert!(stored_proof
.err()
.unwrap()
.to_string()
.contains("Missing job for stored proof"));
assert!(stored_aggregated_proof
.err()
.unwrap()
.to_string()
.contains("Missing job for stored aggregated proof"));
// Add jobs to `job_prover_queue`.
let job_data = serde_json::Value::default();
let stored_job = ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(1),
BlockNumber(1),
job_data.clone(),
0,
ProverJobType::SingleProof,
)
.await;
let stored_aggregated_job = ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(1),
BlockNumber(1),
job_data,
1,
ProverJobType::AggregatedProof,
)
.await;
assert!(stored_job.is_ok());
assert!(stored_aggregated_job.is_ok());
// Get job id.
let stored_job_id = get_idle_job_from_queue(&mut storage).await?.job_id;
let stored_aggregated_job_id = get_idle_job_from_queue(&mut storage).await?.job_id;
// Store proofs.
let stored_proof = ProverSchema(&mut storage)
.store_proof(stored_job_id, BlockNumber(1), &proof)
.await;
let stored_aggregated_proof = ProverSchema(&mut storage)
.store_aggregated_proof(
stored_aggregated_job_id,
BlockNumber(1),
BlockNumber(1),
&aggregated_proof,
)
.await;
assert!(stored_proof.is_ok());
assert!(stored_aggregated_proof.is_ok());
// Now load it.
let loaded_proof = ProverSchema(&mut storage)
.load_proof(BlockNumber(1))
.await?;
let loaded_aggregated_proof = ProverSchema(&mut storage)
.load_aggregated_proof(BlockNumber(1), BlockNumber(1))
.await?;
assert!(loaded_proof.is_some());
assert!(loaded_aggregated_proof.is_some());
Ok(())
}
/// Checks that `pending_jobs_count` method of schema returns the amount
/// of jobs for which proof is not generating (or generated) yet.
async fn pending_jobs_count(mut storage: &mut StorageProcessor<'_>) -> QueryResult<()> {
// Initially there are no jobs.
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 0);
// Create a some jobs.
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(2),
BlockNumber(2),
Default::default(),
1,
ProverJobType::SingleProof,
)
.await?;
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(3),
BlockNumber(3),
Default::default(),
1,
ProverJobType::SingleProof,
)
.await?;
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(2),
BlockNumber(3),
Default::default(),
0,
ProverJobType::AggregatedProof,
)
.await?;
// We've created 3 jobs and no jobs were assigned yet.
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 3);
let first_job = get_idle_job_from_queue(&mut storage).await?;
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 3);
// Create next run & repeat checks.
let second_job = get_idle_job_from_queue(&mut storage).await?;
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 3);
let third_job = get_idle_job_from_queue(&mut storage).await?;
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 3);
// Record prover is working and stopped it.
ProverSchema(&mut storage)
.record_prover_is_working(first_job.job_id, "test_prover")
.await?;
ProverSchema(&mut storage)
.record_prover_is_working(second_job.job_id, "test_prover")
.await?;
ProverSchema(&mut storage)
.record_prover_is_working(third_job.job_id, "test_prover")
.await?;
// Store one proof and then turn off the prover.
ProverSchema(&mut storage)
.store_proof(
third_job.job_id,
third_job.first_block,
&get_sample_single_proof(),
)
.await?;
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 2);
ProverSchema(&mut storage)
.record_prover_stop("test_prover")
.await?;
let jobs_count = ProverSchema(&mut storage).pending_jobs_count().await?;
assert_eq!(jobs_count, 2);
Ok(())
}
/// Checks that the witness can be stored and loaded.
#[db_test]
async fn test_store_witness(mut storage: StorageProcessor<'_>) -> QueryResult<()> {
const BLOCK_NUMBER: BlockNumber = BlockNumber(1);
const BLOCK_SIZE: usize = 100;
// No witness stored for the block.
assert!(storage
.prover_schema()
.get_witness(BLOCK_NUMBER)
.await?
.is_none());
// FK constraint.
storage
.chain()
.block_schema()
.save_block(gen_sample_block(
BLOCK_NUMBER,
BLOCK_SIZE,
Default::default(),
))
.await?;
// Store the witness.
let expected = String::from("test");
let witness = serde_json::to_value(expected.clone()).unwrap();
storage
.prover_schema()
.store_witness(BLOCK_NUMBER, witness)
.await?;
// Now load it.
let loaded = storage
.prover_schema()
.get_witness(BLOCK_NUMBER)
.await?
.map(|value| serde_json::from_value(value).unwrap());
assert_eq!(loaded.as_ref(), Some(&expected));
// Do nothing on conflict.
let not_expected = String::from("__test");
let witness = serde_json::to_value(expected.clone()).unwrap();
storage
.prover_schema()
.store_witness(BLOCK_NUMBER, witness)
.await?;
let loaded = storage
.prover_schema()
.get_witness(BLOCK_NUMBER)
.await?
.map(|value| serde_json::from_value(value).unwrap());
assert_ne!(loaded, Some(not_expected));
assert_eq!(loaded, Some(expected));
Ok(())
}
/// Checks that block witnesses are removed correctly.
#[db_test]
async fn test_remove_witnesses(mut storage: StorageProcessor<'_>) -> QueryResult<()> {
// Insert 5 blocks and witnesses for them.
for block_number in 1..=5 {
storage
.chain()
.block_schema()
.save_block(gen_sample_block(
BlockNumber(block_number),
100,
Default::default(),
))
.await?;
let witness = serde_json::to_value(String::from("test")).unwrap();
storage
.prover_schema()
.store_witness(BlockNumber(block_number), witness)
.await?;
}
// Remove witnesses for the 4th and 5th blocks.
storage | .await?;
// Check that there is a witness for the 3rd block and no witness for the 4th.
assert!(storage
.prover_schema()
.get_witness(BlockNumber(3))
.await?
.is_some());
assert!(storage
.prover_schema()
.get_witness(BlockNumber(4))
.await?
.is_none());
Ok(())
}
/// Checks that block proofs are removed correctly.
#[db_test]
async fn test_remove_proofs(mut storage: StorageProcessor<'_>) -> QueryResult<()> {
// Lock to prevent database deadlock
let _lock = MUTEX.lock().await;
let proof = get_sample_single_proof();
let job_data = serde_json::Value::default();
// Insert proofs for 5 blocks.
for block_number in 1..=5 {
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(block_number),
BlockNumber(block_number),
job_data.clone(),
0,
ProverJobType::SingleProof,
)
.await?;
let job_id = get_idle_job_from_queue(&mut storage).await?.job_id;
ProverSchema(&mut storage)
.store_proof(job_id, BlockNumber(block_number), &proof)
.await?;
}
// Remove proofs for the 4th and 5th blocks.
ProverSchema(&mut storage)
.remove_proofs(BlockNumber(3))
.await?;
// Check that there is a proof for the 3rd block and no proof for the 4th.
assert!(ProverSchema(&mut storage)
.load_proof(BlockNumber(3))
.await?
.is_some());
assert!(ProverSchema(&mut storage)
.load_proof(BlockNumber(4))
.await?
.is_none());
let aggregated_proof = get_sample_aggregated_proof();
// Insert arregated proofs for 1-2 blocks and 3-5 blocks.
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(1),
BlockNumber(2),
job_data.clone(),
1,
ProverJobType::AggregatedProof,
)
.await?;
let job_id = get_idle_job_from_queue(&mut storage).await?.job_id;
ProverSchema(&mut storage)
.store_aggregated_proof(job_id, BlockNumber(1), BlockNumber(2), &aggregated_proof)
.await?;
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(3),
BlockNumber(5),
job_data.clone(),
1,
ProverJobType::AggregatedProof,
)
.await?;
let job_id = get_idle_job_from_queue(&mut storage).await?.job_id;
ProverSchema(&mut storage)
.store_aggregated_proof(job_id, BlockNumber(3), BlockNumber(5), &aggregated_proof)
.await?;
// Remove aggregated proofs for blocks with numbers greater than 3. It means that proof for 3-5 blocks should be deleted.
ProverSchema(&mut storage)
.remove_aggregated_proofs(BlockNumber(3))
.await?;
// Check that proof 1-2 is present and 3-5 is not.
assert!(ProverSchema(&mut storage)
.load_aggregated_proof(BlockNumber(1), BlockNumber(2))
.await?
.is_some());
assert!(ProverSchema(&mut storage)
.load_aggregated_proof(BlockNumber(3), BlockNumber(5))
.await?
.is_none());
Ok(())
}
/// Checks that prover jobs are removed correctly.
#[db_test]
async fn test_remove_prover_jobs(mut storage: StorageProcessor<'_>) -> QueryResult<()> {
let job_data = serde_json::Value::default();
// Insert jobs for blocks 1-3 and 4-5.
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(1),
BlockNumber(3),
job_data.clone(),
1,
ProverJobType::AggregatedProof,
)
.await?;
ProverSchema(&mut storage)
.add_prover_job_to_job_queue(
BlockNumber(4),
BlockNumber(5),
job_data.clone(),
1,
ProverJobType::AggregatedProof,
)
.await?;
// Remove prover_jobs for blocks with numbers greater than 2. After that only one job for 1-2 blocks should left.
ProverSchema(&mut storage)
.remove_prover_jobs(BlockNumber(2))
.await?;
assert_eq!(
ProverSchema(&mut storage)
.get_last_block_prover_job_queue(ProverJobType::AggregatedProof)
.await?,
BlockNumber(2)
);
assert_eq!(ProverSchema(&mut storage).pending_jobs_count().await?, 1);
Ok(())
} | .prover_schema()
.remove_witnesses(BlockNumber(3)) |
workbench.editor.module.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {NgModule} from '@angular/core';
import {EditorComponent} from './component/detail-workbench/datail-workbench-editor/editor.component';
import {CommonModule} from '@common/common.module';
@NgModule({ | declarations: [EditorComponent],
exports: [EditorComponent]
})
export class WorkbenchEditorModule {
} | imports: [CommonModule], |
pricesheet.go | package billing
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// PriceSheetClient is the billing client provides access to billing resources for Azure subscriptions.
type PriceSheetClient struct {
BaseClient |
// NewPriceSheetClient creates an instance of the PriceSheetClient client.
func NewPriceSheetClient(subscriptionID string) PriceSheetClient {
return NewPriceSheetClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewPriceSheetClientWithBaseURI creates an instance of the PriceSheetClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewPriceSheetClientWithBaseURI(baseURI string, subscriptionID string) PriceSheetClient {
return PriceSheetClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Download download price sheet for an invoice.
// Parameters:
// billingAccountName - azure Billing Account ID.
// invoiceName - the name of an invoice resource.
func (client PriceSheetClient) Download(ctx context.Context, billingAccountName string, invoiceName string) (result PriceSheetDownloadFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PriceSheetClient.Download")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DownloadPreparer(ctx, billingAccountName, invoiceName)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.PriceSheetClient", "Download", nil, "Failure preparing request")
return
}
result, err = client.DownloadSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.PriceSheetClient", "Download", nil, "Failure sending request")
return
}
return
}
// DownloadPreparer prepares the Download request.
func (client PriceSheetClient) DownloadPreparer(ctx context.Context, billingAccountName string, invoiceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"billingAccountName": autorest.Encode("path", billingAccountName),
"invoiceName": autorest.Encode("path", invoiceName),
}
const APIVersion = "2018-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/pricesheet/default/download", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DownloadSender sends the Download request. The method will close the
// http.Response Body if it receives an error.
func (client PriceSheetClient) DownloadSender(req *http.Request) (future PriceSheetDownloadFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client PriceSheetClient) (du DownloadURL, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.PriceSheetDownloadFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("billing.PriceSheetDownloadFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if du.Response.Response, err = future.GetResult(sender); err == nil && du.Response.Response.StatusCode != http.StatusNoContent {
du, err = client.DownloadResponder(du.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.PriceSheetDownloadFuture", "Result", du.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// DownloadResponder handles the response to the Download request. The method always
// closes the http.Response Body.
func (client PriceSheetClient) DownloadResponder(resp *http.Response) (result DownloadURL, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
} | } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.