file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
identify.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package distros
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"k8s.io/klog"
)
// FindDistribution identifies the distribution on which we are running
// We will likely remove this when everything is containerized
func | (rootfs string) (Distribution, error) {
// Ubuntu has /etc/lsb-release (and /etc/debian_version)
lsbRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/lsb-release"))
if err == nil {
for _, line := range strings.Split(string(lsbRelease), "\n") {
line = strings.TrimSpace(line)
if line == "DISTRIB_CODENAME=xenial" {
return DistributionXenial, nil
} else if line == "DISTRIB_CODENAME=bionic" {
klog.Warningf("bionic is not fully supported nor tested for Kops and Kubernetes")
klog.Warningf("this should only be used for testing purposes.")
return DistributionBionic, nil
}
}
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/lsb-release: %v", err)
}
// Debian has /etc/debian_version
debianVersionBytes, err := ioutil.ReadFile(path.Join(rootfs, "etc/debian_version"))
if err == nil {
debianVersion := strings.TrimSpace(string(debianVersionBytes))
if strings.HasPrefix(debianVersion, "8.") {
return DistributionJessie, nil
} else if strings.HasPrefix(debianVersion, "9.") {
return DistributionDebian9, nil
} else if strings.HasPrefix(debianVersion, "10.") {
return DistributionDebian10, nil
} else {
return "", fmt.Errorf("unhandled debian version %q", debianVersion)
}
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/debian_version: %v", err)
}
// Redhat has /etc/redhat-release
// Centos has /etc/centos-release
redhatRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/redhat-release"))
if err == nil {
for _, line := range strings.Split(string(redhatRelease), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Red Hat Enterprise Linux Server release 7.") {
return DistributionRhel7, nil
}
if strings.HasPrefix(line, "CentOS Linux release 7.") {
return DistributionCentos7, nil
}
if strings.HasPrefix(line, "Red Hat Enterprise Linux release 8.") {
return DistributionRhel8, nil
}
if strings.HasPrefix(line, "CentOS Linux release 8.") {
return DistributionCentos8, nil
}
}
klog.Warningf("unhandled redhat-release info %q", string(lsbRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/redhat-release: %v", err)
}
// CoreOS uses /usr/lib/os-release
// Flatcar uses /usr/lib/os-release
usrLibOsRelease, err := ioutil.ReadFile(path.Join(rootfs, "usr/lib/os-release"))
if err == nil {
for _, line := range strings.Split(string(usrLibOsRelease), "\n") {
line = strings.TrimSpace(line)
if line == "ID=coreos" {
return DistributionCoreOS, nil
} else if line == "ID=flatcar" {
return DistributionFlatcar, nil
}
}
klog.Warningf("unhandled os-release info %q", string(usrLibOsRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /usr/lib/os-release: %v", err)
}
// ContainerOS, Amazon Linux 2 uses /etc/os-release
osRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/os-release"))
if err == nil {
for _, line := range strings.Split(string(osRelease), "\n") {
line = strings.TrimSpace(line)
if line == "ID=cos" {
return DistributionContainerOS, nil
}
if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") {
// TODO: This is a hack. Amazon Linux is "special" and should get its own distro entry
return DistributionRhel7, nil
}
}
klog.Warningf("unhandled /etc/os-release info %q", string(osRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/os-release: %v", err)
}
klog.Warningf("could not determine distro")
klog.Warningf(" /etc/lsb-release: %q", string(lsbRelease))
klog.Warningf(" /etc/debian_version: %q", string(debianVersionBytes))
klog.Warningf(" /etc/redhat-release: %q", string(redhatRelease))
klog.Warningf(" /usr/lib/os-release: %q", string(usrLibOsRelease))
klog.Warningf(" /etc/os-release: %q", string(osRelease))
return "", fmt.Errorf("cannot identify distro")
}
| FindDistribution |
dataconnections.go | package kusto
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// DataConnectionsClient is the the Azure Kusto management API provides a RESTful set of web services that interact
// with Azure Kusto services to manage your clusters and databases. The API enables you to create, update, and delete
// clusters and databases.
type DataConnectionsClient struct {
BaseClient
}
// NewDataConnectionsClient creates an instance of the DataConnectionsClient client.
func NewDataConnectionsClient(subscriptionID string) DataConnectionsClient {
return NewDataConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDataConnectionsClientWithBaseURI creates an instance of the DataConnectionsClient client using a custom endpoint.
// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewDataConnectionsClientWithBaseURI(baseURI string, subscriptionID string) DataConnectionsClient |
// CheckNameAvailability checks that the data connection name is valid and is not already in use.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// dataConnectionName - the name of the data connection.
func (client DataConnectionsClient) CheckNameAvailability(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName DataConnectionCheckNameRequest) (result CheckNameResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.CheckNameAvailability")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: dataConnectionName,
Constraints: []validation.Constraint{{Target: "dataConnectionName.Name", Name: validation.Null, Rule: true, Chain: nil},
{Target: "dataConnectionName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("kusto.DataConnectionsClient", "CheckNameAvailability", err.Error())
}
req, err := client.CheckNameAvailabilityPreparer(ctx, resourceGroupName, clusterName, databaseName, dataConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "CheckNameAvailability", nil, "Failure preparing request")
return
}
resp, err := client.CheckNameAvailabilitySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "CheckNameAvailability", resp, "Failure sending request")
return
}
result, err = client.CheckNameAvailabilityResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "CheckNameAvailability", resp, "Failure responding to request")
return
}
return
}
// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request.
func (client DataConnectionsClient) CheckNameAvailabilityPreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName DataConnectionCheckNameRequest) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/checkNameAvailability", pathParameters),
autorest.WithJSON(dataConnectionName),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateOrUpdate creates or updates a data connection.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// dataConnectionName - the name of the data connection.
// parameters - the data connection parameters supplied to the CreateOrUpdate operation.
func (client DataConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string, parameters BasicDataConnection) (result DataConnectionsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, databaseName, dataConnectionName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client DataConnectionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string, parameters BasicDataConnection) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"dataConnectionName": autorest.Encode("path", dataConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) CreateOrUpdateSender(req *http.Request) (future DataConnectionsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client DataConnectionsClient) (dcm DataConnectionModel, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("kusto.DataConnectionsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
dcm.Response.Response, err = future.GetResult(sender)
if dcm.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsCreateOrUpdateFuture", "Result", nil, "received nil response and error")
}
if err == nil && dcm.Response.Response.StatusCode != http.StatusNoContent {
dcm, err = client.CreateOrUpdateResponder(dcm.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsCreateOrUpdateFuture", "Result", dcm.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result DataConnectionModel, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// DataConnectionValidationMethod checks that the data connection parameters are valid.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// parameters - the data connection parameters supplied to the CreateOrUpdate operation.
func (client DataConnectionsClient) DataConnectionValidationMethod(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, parameters DataConnectionValidation) (result DataConnectionsDataConnectionValidationMethodFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.DataConnectionValidationMethod")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DataConnectionValidationMethodPreparer(ctx, resourceGroupName, clusterName, databaseName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "DataConnectionValidationMethod", nil, "Failure preparing request")
return
}
result, err = client.DataConnectionValidationMethodSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "DataConnectionValidationMethod", nil, "Failure sending request")
return
}
return
}
// DataConnectionValidationMethodPreparer prepares the DataConnectionValidationMethod request.
func (client DataConnectionsClient) DataConnectionValidationMethodPreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, parameters DataConnectionValidation) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnectionValidation", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DataConnectionValidationMethodSender sends the DataConnectionValidationMethod request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) DataConnectionValidationMethodSender(req *http.Request) (future DataConnectionsDataConnectionValidationMethodFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client DataConnectionsClient) (dcvlr DataConnectionValidationListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsDataConnectionValidationMethodFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("kusto.DataConnectionsDataConnectionValidationMethodFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
dcvlr.Response.Response, err = future.GetResult(sender)
if dcvlr.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsDataConnectionValidationMethodFuture", "Result", nil, "received nil response and error")
}
if err == nil && dcvlr.Response.Response.StatusCode != http.StatusNoContent {
dcvlr, err = client.DataConnectionValidationMethodResponder(dcvlr.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsDataConnectionValidationMethodFuture", "Result", dcvlr.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// DataConnectionValidationMethodResponder handles the response to the DataConnectionValidationMethod request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) DataConnectionValidationMethodResponder(resp *http.Response) (result DataConnectionValidationListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the data connection with the given name.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// dataConnectionName - the name of the data connection.
func (client DataConnectionsClient) Delete(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string) (result DataConnectionsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName, databaseName, dataConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client DataConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"dataConnectionName": autorest.Encode("path", dataConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) DeleteSender(req *http.Request) (future DataConnectionsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client DataConnectionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("kusto.DataConnectionsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get returns a data connection.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// dataConnectionName - the name of the data connection.
func (client DataConnectionsClient) Get(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string) (result DataConnectionModel, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, clusterName, databaseName, dataConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client DataConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"dataConnectionName": autorest.Encode("path", dataConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) GetResponder(resp *http.Response) (result DataConnectionModel, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByDatabase returns the list of data connections of the given Kusto database.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
func (client DataConnectionsClient) ListByDatabase(ctx context.Context, resourceGroupName string, clusterName string, databaseName string) (result DataConnectionListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.ListByDatabase")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListByDatabasePreparer(ctx, resourceGroupName, clusterName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "ListByDatabase", nil, "Failure preparing request")
return
}
resp, err := client.ListByDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "ListByDatabase", resp, "Failure sending request")
return
}
result, err = client.ListByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "ListByDatabase", resp, "Failure responding to request")
return
}
return
}
// ListByDatabasePreparer prepares the ListByDatabase request.
func (client DataConnectionsClient) ListByDatabasePreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByDatabaseSender sends the ListByDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) ListByDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByDatabaseResponder handles the response to the ListByDatabase request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) ListByDatabaseResponder(resp *http.Response) (result DataConnectionListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates a data connection.
// Parameters:
// resourceGroupName - the name of the resource group containing the Kusto cluster.
// clusterName - the name of the Kusto cluster.
// databaseName - the name of the database in the Kusto cluster.
// dataConnectionName - the name of the data connection.
// parameters - the data connection parameters supplied to the Update operation.
func (client DataConnectionsClient) Update(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string, parameters BasicDataConnection) (result DataConnectionsUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DataConnectionsClient.Update")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, clusterName, databaseName, dataConnectionName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsClient", "Update", nil, "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client DataConnectionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, databaseName string, dataConnectionName string, parameters BasicDataConnection) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"databaseName": autorest.Encode("path", databaseName),
"dataConnectionName": autorest.Encode("path", dataConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-09-18"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Kusto/clusters/{clusterName}/databases/{databaseName}/dataConnections/{dataConnectionName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client DataConnectionsClient) UpdateSender(req *http.Request) (future DataConnectionsUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client DataConnectionsClient) (dcm DataConnectionModel, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("kusto.DataConnectionsUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
dcm.Response.Response, err = future.GetResult(sender)
if dcm.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsUpdateFuture", "Result", nil, "received nil response and error")
}
if err == nil && dcm.Response.Response.StatusCode != http.StatusNoContent {
dcm, err = client.UpdateResponder(dcm.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "kusto.DataConnectionsUpdateFuture", "Result", dcm.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client DataConnectionsClient) UpdateResponder(resp *http.Response) (result DataConnectionModel, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| {
return DataConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
} |
main.go | package main
import (
"log"
"time"
"math/rand"
"github.com/go-telegram-bot-api/telegram-bot-api"
"effects"
)
func NewInlineQueryResultArticleDesc(id string, title string, messageText string, description string) tgbotapi.InlineQueryResultArticle {
newArticle := tgbotapi.NewInlineQueryResultArticle(id, title, messageText)
newArticle.Description = description
return newArticle
}
func main() | {
// Initialize global pseudo random generator
rand.Seed(time.Now().Unix())
bot, err := tgbotapi.NewBotAPI(botToken)
if err != nil {
log.Panic(err)
}
log.Printf("Authorized on account %s", bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := bot.GetUpdatesChan(u)
for update := range updates {
if update.InlineQuery == nil || len(update.InlineQuery.Query) == 0 { // if no inline query, ignore it
continue
}
vaporwaveText,
zalgoText,
ideographsText,
whiteCirclesText,
blackCirclesText,
whiteSquaresText,
blackSquaresText,
parenthesisText,
frakturText,
scriptText,
doubleStruckText,
typewriterText,
underlineText,
overlineText,
strikethroughText,
dottedText,
triangledText,
deniedText,
risText,
brailleText :=
effects.Vaporwave(update.InlineQuery.Query),
effects.Zalgo(update.InlineQuery.Query),
effects.Ideographs(update.InlineQuery.Query),
effects.WhiteCircles(update.InlineQuery.Query),
effects.BlackCircles(update.InlineQuery.Query),
effects.WhiteSquares(update.InlineQuery.Query),
effects.BlackSquares(update.InlineQuery.Query),
effects.Parenthesis(update.InlineQuery.Query),
effects.Fraktur(update.InlineQuery.Query),
effects.Script(update.InlineQuery.Query),
effects.DoubleStruck(update.InlineQuery.Query),
effects.Typewriter(update.InlineQuery.Query),
effects.Underline(update.InlineQuery.Query),
effects.Overline(update.InlineQuery.Query),
effects.Strikethrough(update.InlineQuery.Query),
effects.Dotted(update.InlineQuery.Query),
effects.Triangled(update.InlineQuery.Query),
effects.Denied(update.InlineQuery.Query),
effects.RIS(update.InlineQuery.Query),
effects.Braille(update.InlineQuery.Query)
inlineConf := tgbotapi.InlineConfig{
InlineQueryID: update.InlineQuery.ID,
IsPersonal: true,
CacheTime: 0,
Results: []interface{}{
NewInlineQueryResultArticleDesc( "1", "VAPORWAVE", vaporwaveText, vaporwaveText),
NewInlineQueryResultArticleDesc( "2", "Z͉̩̖̝͗aͩl̵͖̯̰̠͎̘ͣͭͥ͋ͅḡ̒̏ő̫ͣ͋ͅ", zalgoText, zalgoText),
NewInlineQueryResultArticleDesc( "3", "工刀モ口ム尺丹ㄗ卄ち", ideographsText, ideographsText),
NewInlineQueryResultArticleDesc( "4", "Ⓦⓗⓘⓣⓔ ⓒⓘⓡⓒⓛⓔⓢ", whiteCirclesText, whiteCirclesText),
NewInlineQueryResultArticleDesc( "5", "🅑🅛🅐🅒🅚 🅒🅘🅡🅒🅛🅔🅢", blackCirclesText, blackCirclesText),
NewInlineQueryResultArticleDesc( "6", "🅆🄷🄸🅃🄴 🅂🅀🅄🄰🅁🄴🅂", whiteSquaresText, whiteSquaresText),
NewInlineQueryResultArticleDesc( "7", "🆆🅷🅸🆃🅴 🆂🆀🆄🅰🆁🅴🆂", blackSquaresText, blackSquaresText),
NewInlineQueryResultArticleDesc( "8", "🄟⒜⒭⒠⒩⒯⒣⒠⒮⒤⒮", parenthesisText, parenthesisText),
NewInlineQueryResultArticleDesc( "9", "𝕱𝖗𝖆𝖐𝖙𝖚𝖗", frakturText, frakturText),
NewInlineQueryResultArticleDesc("10", "𝓢𝓬𝓻𝓲𝓹𝓽", scriptText, scriptText),
NewInlineQueryResultArticleDesc("11", "𝔻𝕠𝕦𝕓𝕝𝕖-𝕤𝕥𝕣𝕦𝕔𝕜", doubleStruckText, doubleStruckText),
NewInlineQueryResultArticleDesc("12", "𝚃𝚢𝚙𝚎𝚠𝚛𝚒𝚝𝚎𝚛", typewriterText, typewriterText),
NewInlineQueryResultArticleDesc("13", "U̲n̲d̲e̲r̲l̲i̲n̲e̲", underlineText, underlineText),
NewInlineQueryResultArticleDesc("14", "O̅v̅e̅r̅l̅i̅n̅e̅", overlineText, overlineText),
NewInlineQueryResultArticleDesc("15", "S̶t̶r̶i̶k̶e̶t̶h̶r̶o̶u̶g̶h̶", strikethroughText, strikethroughText),
NewInlineQueryResultArticleDesc("16", "Ḋȯṫṫėḋ", dottedText, dottedText),
NewInlineQueryResultArticleDesc("17", "T⃤r⃤i⃤a⃤n⃤g⃤l⃤e⃤d⃤", triangledText, triangledText),
NewInlineQueryResultArticleDesc("18", "D⃠e⃠n⃠i⃠e⃠d⃠", deniedText, deniedText),
NewInlineQueryResultArticleDesc("19", "🇷 🇮 🇸", risText, risText),
NewInlineQueryResultArticleDesc("20", "⠠⠃⠗⠁⠊⠇⠇⠑", brailleText, brailleText),
},
}
if _, err := bot.AnswerInlineQuery(inlineConf); err != nil {
log.Println(err)
}
}
}
|
|
AssetList.tsx | import range from "lodash/range";
import PropTypes from "prop-types";
import React from "react";
import { DataTable, PlayerNameLabels } from "../../components";
import { getCols, helpers } from "../../util";
import type { View } from "../../../common/types";
import { Dropdown } from "react-bootstrap";
type HandleToggle = (
userOrOther: "other" | "user",
playerOrPick: "pick" | "player",
includeOrExclude: "include" | "exclude",
id: number,
) => Promise<void>;
type HandleBulk = (
type: "check" | "clear",
userOrOther: "other" | "user",
playerOrPick: "pick" | "player",
draftRoundOnly?: number,
) => Promise<void>;
type UserOrOther = "user" | "other";
type TradeProps = View<"trade">;
type Stats = TradeProps["stats"];
type Picks = TradeProps["userRoster"];
type Roster = TradeProps["otherRoster"];
const genPlayerRows = (
players: Roster,
handleToggle: HandleToggle,
userOrOther: UserOrOther,
stats: Stats,
challengeNoRatings: boolean,
) => {
return players.map(p => {
return {
key: p.pid,
data: [
<input
type="checkbox"
title={p.untradableMsg}
checked={p.included}
disabled={p.untradable}
onChange={() => {
handleToggle(userOrOther, "player", "include", p.pid);
}}
/>,
<input
type="checkbox"
title="Exclude this player from counter offers"
checked={p.excluded}
disabled={p.untradable}
onChange={() => {
handleToggle(userOrOther, "player", "exclude", p.pid);
}}
/>,
<PlayerNameLabels | jerseyNumber={p.jerseyNumber}
pid={p.pid}
skills={p.ratings.skills}
watch={p.watch}
>
{p.name}
</PlayerNameLabels>,
p.ratings.pos,
p.age,
!challengeNoRatings ? p.ratings.ovr : null,
!challengeNoRatings ? p.ratings.pot : null,
helpers.formatCurrency(p.contract.amount, "M"),
p.contract.exp,
...stats.map(stat => helpers.roundStat(p.stats[stat], stat)),
],
classNames: {
"table-danger": p.excluded && !p.included,
"table-success": p.included,
},
};
});
};
const genPickRows = (
picks: Picks,
handleToggle: HandleToggle,
userOrOther: UserOrOther,
) => {
return picks.map(pick => {
return {
key: pick.dpid,
data: [
<input
name="other-dpids"
type="checkbox"
checked={pick.included}
onChange={() => {
handleToggle(userOrOther, "pick", "include", pick.dpid);
}}
/>,
<input
type="checkbox"
title="Exclude this pick from counter offers"
checked={pick.excluded}
onChange={() => {
handleToggle(userOrOther, "pick", "exclude", pick.dpid);
}}
/>,
pick.desc,
],
classNames: {
"table-danger": pick.excluded && !pick.included,
"table-success": pick.included,
},
};
});
};
const pickCols = getCols("", "X", "Draft Picks");
pickCols[0].sortSequence = [];
pickCols[2].width = "100%";
const AssetList = ({
challengeNoRatings,
handleBulk,
handleToggle,
numDraftRounds,
picks,
roster,
stats,
userOrOther,
}: {
challengeNoRatings: boolean;
handleBulk: HandleBulk;
handleToggle: HandleToggle;
numDraftRounds: number;
picks: Picks;
roster: Roster;
stats: Stats;
userOrOther: UserOrOther;
}) => {
const playerCols = getCols(
"",
"X",
"Name",
"Pos",
"Age",
"Ovr",
"Pot",
"Contract",
"Exp",
...stats.map(stat => `stat:${stat}`),
);
playerCols[0].sortSequence = [];
playerCols[2].width = "100%";
const playerRows = genPlayerRows(
roster,
handleToggle,
userOrOther,
stats,
challengeNoRatings,
);
const pickRows = genPickRows(picks, handleToggle, userOrOther);
const userOrOtherKey = `${userOrOther[0].toUpperCase()}${userOrOther.slice(
1,
)}`;
return (
<div className="row">
<div className="col-xl-9">
<Dropdown className="d-inline-block">
<Dropdown.Toggle
variant="secondary"
id={`trade-players-bulk-${userOrOtherKey}`}
className="btn-sm"
>
Bulk exclude
</Dropdown.Toggle>
<Dropdown.Menu>
<Dropdown.Item
onClick={() => {
handleBulk("check", userOrOther, "player");
}}
>
Make all untradeable
</Dropdown.Item>
<Dropdown.Item
onClick={() => {
handleBulk("clear", userOrOther, "player");
}}
>
Clear all untradeable
</Dropdown.Item>
</Dropdown.Menu>
</Dropdown>
<DataTable
className="datatable-negative-margin-top"
cols={playerCols}
defaultSort={[5, "desc"]}
name={`Trade:${userOrOtherKey}`}
rows={playerRows}
/>
</div>
<div className="col-xl-3">
<Dropdown className="d-inline-block">
<Dropdown.Toggle
variant="secondary"
id={`trade-picks-bulk-${userOrOtherKey}`}
className="btn-sm mb-2"
>
Bulk exclude
</Dropdown.Toggle>
<Dropdown.Menu>
<Dropdown.Item
onClick={() => {
handleBulk("check", userOrOther, "pick");
}}
>
Make all untradeable
</Dropdown.Item>
{range(numDraftRounds).map(i => (
<Dropdown.Item
key={i}
onClick={() => {
handleBulk("check", userOrOther, "pick", i + 1);
}}
>
Make all {helpers.ordinal(i + 1)} round picks untradeable
</Dropdown.Item>
))}
<Dropdown.Item
onClick={() => {
handleBulk("clear", userOrOther, "pick");
}}
>
Clear all untradeable
</Dropdown.Item>
</Dropdown.Menu>
</Dropdown>
<DataTable
cols={pickCols}
defaultSort={[1, "asc"]}
hideAllControls
name={`Trade:Picks:${userOrOtherKey}`}
rows={pickRows}
/>
</div>
</div>
);
};
AssetList.propTypes = {
handleToggle: PropTypes.func.isRequired,
picks: PropTypes.array.isRequired,
roster: PropTypes.array.isRequired,
stats: PropTypes.arrayOf(PropTypes.string).isRequired,
userOrOther: PropTypes.oneOf(["other", "user"]).isRequired,
};
export default AssetList; | injury={p.injury} |
diff_test.py | # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the diff and difffull modules."""
import tempfile
from fire import testutils
from examples.diff import diff
from examples.diff import difffull
class DiffTest(testutils.BaseTestCase):
"""The purpose of these tests is to ensure the difflib wrappers works.
It is not the goal of these tests to exhaustively test difflib functionality.
"""
def setUp(self):
self.file1 = file1 = tempfile.NamedTemporaryFile()
self.file2 = file2 = tempfile.NamedTemporaryFile()
file1.write(b'test\ntest1\n')
file2.write(b'test\ntest2\nextraline\n')
file1.flush()
file2.flush()
self.diff = diff.DiffLibWrapper(file1.name, file2.name)
def | (self):
self.assertEqual(self.diff.fromlines, ['test\n', 'test1\n'])
self.assertEqual(self.diff.tolines, ['test\n', 'test2\n', 'extraline\n'])
def testUnifiedDiff(self):
results = list(self.diff.unified_diff())
self.assertTrue(results[0].startswith('--- ' + self.file1.name))
self.assertTrue(results[1].startswith('+++ ' + self.file2.name))
self.assertEqual(
results[2:],
[
'@@ -1,2 +1,3 @@\n',
' test\n',
'-test1\n',
'+test2\n',
'+extraline\n',
]
)
def testContextDiff(self):
expected_lines = [
'***************\n',
'*** 1,2 ****\n',
' test\n',
'! test1\n',
'--- 1,3 ----\n',
' test\n',
'! test2\n',
'! extraline\n']
results = list(self.diff.context_diff())
self.assertEqual(results[2:], expected_lines)
def testNDiff(self):
expected_lines = [
' test\n',
'- test1\n',
'? ^\n',
'+ test2\n',
'? ^\n',
'+ extraline\n']
results = list(self.diff.ndiff())
self.assertEqual(results, expected_lines)
def testMakeDiff(self):
self.assertTrue(''.join(self.diff.make_file()).startswith('\n<!DOC'))
def testDiffFull(self):
self.assertIsNotNone(difffull)
self.assertIsNotNone(difffull.difflib)
if __name__ == '__main__':
testutils.main()
| testSetUp |
l337x_search.rs | use crate::torrent::Torrent;
use crate::SearchProvider;
use async_trait::async_trait;
use log::info;
use std::convert::TryInto;
use std::error::Error;
use torrent_search::*;
//The struct field is needed for compatibility
pub struct L337xSearch {}
impl L337xSearch {
pub fn new() -> L337xSearch |
}
#[async_trait]
impl SearchProvider for L337xSearch {
async fn search(&self, term: &str) -> Result<Vec<Torrent>, Box<dyn Error + Send + Sync>> {
info!("Searching on L337X");
let res = search_l337x(term.to_string()).await.unwrap_or_default();
//info!("Status: {}", res.status());
Ok(parse_l337x(res))
}
fn get_name(&self) -> &'static str {
"L337X"
}
}
fn parse_l337x(results: Vec<TorrentSearchResult>) -> Vec<Torrent> {
let mut results_output: Vec<Torrent> = Vec::new();
for result in results.iter() {
results_output.push(Torrent {
name: result.name.clone(),
magnet_link: result.magnet.as_ref().unwrap().to_string(),
seeders: match result.seeders {
Ok(s) => Some(s.try_into().unwrap()),
_ => None,
},
leechers: match result.leeches {
Ok(l) => Some(l.try_into().unwrap()),
_ => None,
},
});
}
results_output
}
#[cfg(test)]
mod test {
use torrent_search::search_l337x;
#[tokio::test]
async fn test_parse_l337x() {
let torrents = search_l337x("Debian".to_string()).await.unwrap_or_default();
for torrent in torrents.iter() {
assert!(&torrent.magnet.as_ref().unwrap().starts_with("magnet:?"));
assert!(torrent.seeders.is_ok());
assert!(torrent.leeches.is_ok());
}
}
}
| {
L337xSearch {}
} |
cycles_minting_test.rs | use crate::nns::{
get_governance_canister, set_authorized_subnetwork_list, submit_external_proposal_with_test_id,
update_xdr_per_icp, NnsExt,
};
use crate::util::{
assert_all_ready, get_random_application_node_endpoint, get_random_nns_node_endpoint,
runtime_from_url,
};
use canister_test::{Canister, Project, Wasm};
use cycles_minting_canister::{
IcpXdrConversionRateCertifiedResponse, TokensToCycles, CREATE_CANISTER_REFUND_FEE,
DEFAULT_CYCLES_PER_XDR,
};
use dfn_candid::{candid_one, CandidOne};
use dfn_protobuf::ProtoBuf;
use ic_canister_client::{Agent, HttpClient, Sender};
use ic_certified_vars::verify_certificate;
use ic_config::subnet_config::CyclesAccountManagerConfig;
use ic_crypto::threshold_sig_public_key_from_der;
use ic_crypto_tree_hash::MixedHashTree;
use ic_fondue::{ic_instance::InternetComputer, ic_manager::IcHandle};
use ic_nns_common::types::{NeuronId, UpdateIcpXdrConversionRatePayload};
use ic_nns_constants::{
ids::{TEST_NEURON_1_OWNER_KEYPAIR, TEST_USER1_KEYPAIR, TEST_USER1_PRINCIPAL},
CYCLES_MINTING_CANISTER_ID, GOVERNANCE_CANISTER_ID, LEDGER_CANISTER_ID, ROOT_CANISTER_ID,
};
use ic_nns_governance::pb::v1::NnsFunction;
use ic_nns_test_utils::{
governance::{
submit_external_update_proposal_allowing_error, upgrade_nns_canister_by_proposal,
},
ids::TEST_NEURON_1_ID,
};
use ic_registry_subnet_type::SubnetType;
use ic_rosetta_test_utils::make_user;
use ic_types::{
ic00::{CanisterIdRecord, CanisterStatusResult},
Cycles,
};
use ledger_canister::{
self, Block, BlockArg, BlockHeight, BlockRes, Operation, Tokens, DEFAULT_TRANSFER_FEE,
};
use on_wire::{FromWire, IntoWire};
use slog::info;
use url::Url;
pub fn config() -> InternetComputer {
InternetComputer::new()
.add_fast_single_node_subnet(SubnetType::System)
.add_fast_single_node_subnet(SubnetType::Application)
}
pub fn test(handle: IcHandle, ctx: &ic_fondue::pot::Context) {
// Install NNS canisters
ctx.install_nns_canisters(&handle, true);
let rt = tokio::runtime::Runtime::new().expect("Could not create tokio runtime.");
rt.block_on(async move {
let mut rng = ctx.rng.clone();
let nns_endpoint = get_random_nns_node_endpoint(&handle, &mut rng);
nns_endpoint.assert_ready(ctx).await;
let nns = runtime_from_url(nns_endpoint.url.clone());
let agent_client = HttpClient::new();
let (
_controller_user_id,
controller_user_keypair,
_controller_user_public_key,
controller_pid,
) = make_user(7);
let xdr_permyriad_per_icp = 5_000; // = 0.5 XDR/ICP
let icpts_to_cycles = TokensToCycles {
xdr_permyriad_per_icp,
cycles_per_xdr: DEFAULT_CYCLES_PER_XDR.into(),
};
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
// Set the XDR-to-cycles conversion rate.
info!(ctx.logger, "setting CYCLES_PER_XDR");
update_xdr_per_icp(&nns, timestamp, xdr_permyriad_per_icp)
.await
.unwrap();
// Set the XDR-to-cycles conversion rate, but expect it to fail
info!(ctx.logger, "setting conversion rate to 0, failure expected");
let governance_canister = get_governance_canister(&nns);
let proposal_payload = UpdateIcpXdrConversionRatePayload {
timestamp_seconds: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs(),
xdr_permyriad_per_icp: 0,
..Default::default()
};
submit_external_update_proposal_allowing_error(
&governance_canister,
Sender::from_keypair(&TEST_NEURON_1_OWNER_KEYPAIR),
NeuronId(TEST_NEURON_1_ID),
NnsFunction::IcpXdrConversionRate,
proposal_payload,
"Test Title".to_string(),
"Test Summary".to_string(),
)
.await
.unwrap_err();
let canister = Canister::new(&nns, CYCLES_MINTING_CANISTER_ID);
/* Test getting the conversion rate */
let mut conversion_rate_response = canister
.query_(
"get_icp_xdr_conversion_rate",
candid_one::<IcpXdrConversionRateCertifiedResponse, ()>,
(),
)
.await
.unwrap();
let icp_xdr_conversion_rate = conversion_rate_response.data;
// Check that the first call changed the value but not the second one
assert_eq!(
icp_xdr_conversion_rate.xdr_permyriad_per_icp,
xdr_permyriad_per_icp
);
let pk_bytes = handle
.ic_prep_working_dir
.as_ref()
.unwrap()
.root_public_key()
.expect("failed to read threshold sig PK bytes");
let pk = threshold_sig_public_key_from_der(&pk_bytes[..])
.expect("failed to decode threshold sig PK");
let mixed_hash_tree: MixedHashTree =
serde_cbor::from_slice(&conversion_rate_response.hash_tree).unwrap();
// Verify the authenticity of the root hash stored by the canister in the
// certified_data field
verify_certificate(
&conversion_rate_response.certificate[..],
&CYCLES_MINTING_CANISTER_ID,
&pk,
mixed_hash_tree.digest().as_bytes(),
)
.unwrap();
let proposal_payload = UpdateIcpXdrConversionRatePayload {
timestamp_seconds: timestamp,
xdr_permyriad_per_icp: xdr_permyriad_per_icp + 1234,
..Default::default()
};
// Set the XDR-to-cycles conversion rate again but with the same timestamp.
// No change expected.
info!(ctx.logger, "setting CYCLES_PER_XDR");
submit_external_proposal_with_test_id(
&governance_canister,
NnsFunction::IcpXdrConversionRate,
proposal_payload,
)
.await;
conversion_rate_response = canister
.query_(
"get_icp_xdr_conversion_rate",
candid_one::<IcpXdrConversionRateCertifiedResponse, ()>,
(),
)
.await
.unwrap();
let icp_xdr_conversion_rate = conversion_rate_response.data;
// Check rate hasn't changed
assert_eq!(
icp_xdr_conversion_rate.xdr_permyriad_per_icp,
xdr_permyriad_per_icp
);
/* The first attempt to create a canister should fail because we
* haven't registered subnets with the cycles minting canister. */
info!(ctx.logger, "creating canister (no subnets)");
let send_amount = Tokens::new(2, 0).unwrap();
let (err, refund_block) = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: send_amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap_err();
info!(ctx.logger, "error: {}", err);
assert!(err.contains("No subnets in which to create a canister"));
/* Check that the funds for the failed creation attempt are returned to use
* (minus the fees). */
let refund_block = refund_block.unwrap();
check_refund(
&nns_endpoint.url,
&agent_client,
refund_block,
send_amount,
CREATE_CANISTER_REFUND_FEE,
)
.await;
/* Register a subnet. */
info!(ctx.logger, "registering subnets");
let app_subnets: Vec<_> = handle
.as_permutation(&mut rng)
.filter(|ep| ep.subnet.as_ref().map(|s| s.type_of) == Some(SubnetType::Application))
.collect();
assert_all_ready(app_subnets.as_slice(), ctx).await;
let app_subnet_ids: Vec<_> = app_subnets
.into_iter()
.map(|e| e.subnet.as_ref().expect("unassigned node not permitted").id)
.collect();
set_authorized_subnetwork_list(&nns, None, app_subnet_ids.clone())
.await
.unwrap();
/* Create with funds < the canister creation fee. */
info!(ctx.logger, "creating canister (not enough funds)");
let insufficient_amount1 = Tokens::new(0, 10_000_000).unwrap();
let (err, refund_block) = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: insufficient_amount1,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap_err();
info!(ctx.logger, "error: {}", err);
assert!(err.contains("Creating a canister requires a fee of"));
let refund_block = refund_block.unwrap();
check_refund(
&nns_endpoint.url,
&agent_client,
refund_block,
insufficient_amount1,
CREATE_CANISTER_REFUND_FEE,
)
.await;
/* Create with funds < the refund fee. */
info!(ctx.logger, "creating canister (not enough funds)");
let insufficient_amount2 = (DEFAULT_TRANSFER_FEE + Tokens::from_e8s(10_000)).unwrap();
let (err, no_refund_block) = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: insufficient_amount2,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap_err();
info!(ctx.logger, "error: {}", err);
assert!(err.contains("Creating a canister requires a fee of"));
/* There should be no refund, all the funds will be burned. */
assert!(no_refund_block.is_none());
let block = get_block(&nns_endpoint.url, &agent_client, refund_block + 4)
.await
.unwrap()
.unwrap();
let txn = block.transaction();
match txn.operation {
Operation::Burn { amount, .. } => {
assert_eq!(
(insufficient_amount2 - DEFAULT_TRANSFER_FEE).unwrap(),
amount
);
}
_ => panic!("unexpected block {:?}", txn),
}
/* Create with sufficient funds. */
info!(ctx.logger, "creating canister");
let initial_amount = Tokens::new(10_000, 0).unwrap();
let new_canister_id = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: initial_amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap();
/* Check that the funds for the canister creation attempt are burned. */
let block = get_block(&nns_endpoint.url, &agent_client, refund_block + 7)
.await
.unwrap()
.unwrap();
let txn = block.transaction();
match txn.operation {
Operation::Burn { amount, .. } => {
assert_eq!((amount + DEFAULT_TRANSFER_FEE).unwrap(), initial_amount);
}
_ => panic!("unexpected block {:?}", txn),
}
info!(ctx.logger, "topping up");
let top_up_amount = Tokens::new(5_000, 0).unwrap();
cycles_minting_client::TopUpCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: top_up_amount,
target_canister_id: &new_canister_id,
}
.execute()
.await
.unwrap();
/* Check the controller / cycles balance. */
let msg_size = CandidOne(CanisterIdRecord::from(new_canister_id))
.into_bytes()
.unwrap()
.len();
let nonce_size = 8; // see RemoteTestRuntime::get_nonce_vec
let application_endpoint = get_random_application_node_endpoint(&handle, &mut rng);
application_endpoint.assert_ready(ctx).await;
let new_canister_status: CanisterStatusResult =
runtime_from_url(application_endpoint.url.clone())
.get_management_canister()
.update_from_sender(
"canister_status",
candid_one,
CanisterIdRecord::from(new_canister_id),
&Sender::from_keypair(&controller_user_keypair),
)
.await
.unwrap();
assert_eq!(new_canister_status.controller(), controller_pid);
let config = CyclesAccountManagerConfig::application_subnet();
assert_eq!(
new_canister_status.cycles(),
(icpts_to_cycles.to_cycles((initial_amount + top_up_amount).unwrap())
- config.canister_creation_fee
- config.ingress_message_reception_fee
- config.ingress_byte_reception_fee
* (msg_size + "canister_status".len() + nonce_size))
.get()
);
/* Check that the funds for the canister top up attempt are burned. */
let block = get_block(&nns_endpoint.url, &agent_client, refund_block + 10)
.await
.unwrap()
.unwrap();
let txn = block.transaction();
match txn.operation {
Operation::Burn { amount, .. } => {
assert_eq!((amount + DEFAULT_TRANSFER_FEE).unwrap(), top_up_amount);
}
_ => panic!("unexpected block {:?}", txn),
}
/* Override the list of subnets for a specific controller. */
info!(ctx.logger, "registering subnets override");
let system_subnets: Vec<_> = handle
.as_permutation(&mut rng)
.filter(|ep| ep.subnet.as_ref().map(|s| s.type_of) == Some(SubnetType::System))
.collect();
assert_all_ready(system_subnets.as_slice(), ctx).await;
let system_subnet_ids = system_subnets
.iter()
.map(|x| x.subnet.clone().expect("unassigned node not permitted").id)
.collect();
set_authorized_subnetwork_list(&nns, Some(controller_pid), system_subnet_ids)
.await
.unwrap();
info!(ctx.logger, "creating NNS canister");
let nns_amount = Tokens::new(2, 0).unwrap();
let new_canister_id = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: nns_amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap();
/* Check the controller / cycles balance. */
let new_canister_status: CanisterStatusResult = nns
.get_management_canister()
.update_from_sender(
"canister_status",
candid_one,
CanisterIdRecord::from(new_canister_id),
&Sender::from_keypair(&controller_user_keypair),
)
.await
.unwrap();
assert_eq!(new_canister_status.controller(), controller_pid);
assert_eq!(
new_canister_status.cycles(),
icpts_to_cycles.to_cycles(nns_amount).get()
);
/* Try upgrading the cycles minting canister. This should
* preserve its state (such as the principal -> subnets
* mappings). Note: we first update to a dummy canister
* because upgrade_nns_canister_by_proposal() doesn't want to
* upgrade to the same version of the canister. */
info!(
ctx.logger,
"upgrading cycles minting canister to empty module"
);
let wasm = wabt::wat2wasm("(module)").unwrap();
upgrade_nns_canister_by_proposal(
&Canister::new(&nns, CYCLES_MINTING_CANISTER_ID),
&Canister::new(&nns, GOVERNANCE_CANISTER_ID),
&Canister::new(&nns, ROOT_CANISTER_ID),
true,
Wasm::from_bytes(wasm),
)
.await;
info!(ctx.logger, "creating NNS canister (will fail)");
let err = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: nns_amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap_err();
assert!(
err.0
.contains("has no update method 'transaction_notification_pb'"),
"Error message was: {}",
err.0
);
info!(ctx.logger, "upgrading cycles minting canister");
let wasm = Project::cargo_bin_maybe_use_path_relative_to_rs(
"rosetta-api/cycles_minting_canister",
"cycles-minting-canister",
&[],
);
upgrade_nns_canister_by_proposal(
&Canister::new(&nns, CYCLES_MINTING_CANISTER_ID),
&Canister::new(&nns, GOVERNANCE_CANISTER_ID),
&Canister::new(&nns, ROOT_CANISTER_ID),
true,
wasm,
)
.await;
info!(ctx.logger, "creating NNS canister");
cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount: nns_amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap();
/* Exceed the daily cycles minting limit. */
info!(ctx.logger, "creating canister (exceeding daily limit)");
let amount = Tokens::new(100_000, 0).unwrap();
let (err, refund_block) = cycles_minting_client::CreateCanister {
client: agent_client.clone(),
ic_url: nns_endpoint.url.clone(),
ledger_canister_id: &LEDGER_CANISTER_ID,
cycles_canister_id: &CYCLES_MINTING_CANISTER_ID,
sender_keypair: &TEST_USER1_KEYPAIR,
sender_subaccount: None,
amount,
controller_id: &controller_pid,
}
.execute()
.await
.unwrap_err();
info!(ctx.logger, "error: {}", err);
assert!(err
.contains("cycles have been minted in the last 3600 seconds, please try again later"));
let refund_block = refund_block.unwrap();
check_refund(
&nns_endpoint.url,
&agent_client,
refund_block,
amount,
CREATE_CANISTER_REFUND_FEE,
)
.await;
/* Test getting the total number of cycles minted. */
let bytes = Agent::new_with_client(
agent_client.clone(),
nns_endpoint.url.clone(),
Sender::Anonymous,
)
.execute_query(
&CYCLES_MINTING_CANISTER_ID,
"total_cycles_minted",
ProtoBuf(()).into_bytes().unwrap(),
)
.await
.unwrap()
.unwrap();
let cycles_minted: u64 = ProtoBuf::from_bytes(bytes).map(|c| c.0).unwrap();
let total_icpts = (((((insufficient_amount1 + insufficient_amount2).unwrap()
+ initial_amount)
.unwrap()
+ top_up_amount)
.unwrap()
+ nns_amount)
.unwrap()
+ nns_amount)
.unwrap();
assert_eq!(
Cycles::from(cycles_minted),
icpts_to_cycles.to_cycles(total_icpts)
);
});
}
async fn get_block(
ic_url: &Url,
agent_client: &HttpClient,
block_index: BlockHeight,
) -> Result<Option<Block>, String> {
let ledger_agent =
Agent::new_with_client(agent_client.clone(), ic_url.clone(), Sender::Anonymous);
let bytes = ledger_agent
.execute_query(
&LEDGER_CANISTER_ID,
"block_pb",
ProtoBuf(BlockArg(block_index)).into_bytes()?,
)
.await?
.unwrap();
let resp: Result<BlockRes, String> = ProtoBuf::from_bytes(bytes).map(|c| c.0);
match resp? {
BlockRes(None) => Ok(None),
BlockRes(Some(Ok(block))) => Ok(Some(block.decode().unwrap())),
BlockRes(Some(Err(canister_id))) => unimplemented! {"FIXME: {}", canister_id},
}
}
async fn check_refund(
ic_url: &Url,
agent_client: &HttpClient,
refund_block: BlockHeight,
send_amount: Tokens,
refund_fee: Tokens,
) | {
let block = get_block(ic_url, agent_client, refund_block)
.await
.unwrap()
.unwrap();
let txn = block.transaction();
match txn.operation {
Operation::Transfer { amount, to, .. } => {
assert_eq!(
((amount + DEFAULT_TRANSFER_FEE).unwrap() + refund_fee).unwrap(),
send_amount
);
assert_eq!(to, (*TEST_USER1_PRINCIPAL).into());
}
_ => panic!("unexpected block {:?}", txn),
}
let block = get_block(ic_url, agent_client, refund_block + 1)
.await
.unwrap()
.unwrap();
let txn = block.transaction();
match txn.operation {
Operation::Burn { amount, .. } => {
assert_eq!(refund_fee, amount);
}
_ => panic!("unexpected block {:?}", txn),
}
} |
|
remove_txn_receipt_by_index.rs | /*
* Copyright 2021 Cargill Incorporated
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
//! Provides the remove transaction receipt by index operation for `DieselReceiptStore`
use diesel::sql_types::Text;
use transact::protocol::receipt::TransactionReceipt;
use crate::error::InvalidStateError;
use super::{
get_txn_receipt_by_index::ReceiptStoreGetTxnReceiptByIndexOperation,
remove_txn_receipt_by_id::ReceiptStoreRemoveTxnReceiptByIdOperation, ReceiptStoreOperations,
};
use crate::receipt::store::ReceiptStoreError;
pub(in crate::receipt::store::diesel) trait ReceiptStoreRemoveTxnReceiptByIndexOperation {
fn remove_txn_receipt_by_index(
&self,
index: u64,
) -> Result<Option<TransactionReceipt>, ReceiptStoreError>;
}
impl<'a, C> ReceiptStoreRemoveTxnReceiptByIndexOperation for ReceiptStoreOperations<'a, C>
where
C: diesel::Connection,
String: diesel::deserialize::FromSql<Text, C::Backend>,
i64: diesel::deserialize::FromSql<diesel::sql_types::BigInt, C::Backend>,
i32: diesel::deserialize::FromSql<diesel::sql_types::Integer, C::Backend>,
i16: diesel::deserialize::FromSql<diesel::sql_types::SmallInt, C::Backend>,
Vec<u8>: diesel::deserialize::FromSql<diesel::sql_types::Binary, C::Backend>,
{
fn remove_txn_receipt_by_index(
&self,
index: u64,
) -> Result<Option<TransactionReceipt>, ReceiptStoreError> |
}
| {
self.conn
.transaction::<Option<TransactionReceipt>, _, _>(|| {
let id = self
.get_txn_receipt_by_index(index)?
.ok_or_else(|| {
ReceiptStoreError::InvalidStateError(InvalidStateError::with_message(
format!("A transaction receipt with index {} does not exist", index),
))
})?
.transaction_id;
self.remove_txn_receipt_by_id(id)
})
} |
shader.rs | use std::ptr;
use gl;
use gl::types::*;
use super::object::*;
fn set_source(shader: GLuint, src: &[u8]) {
unsafe {
let mut len = src.len() as GLint;
if src[len as usize - 1] == 0 {
len -= 1;
}
let glchars = src.as_ptr() as *const GLchar;
gl::ShaderSource(shader, 1, &glchars, &len);
}
}
fn info_log_length(shader: GLuint) -> GLint {
let mut len = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len);
}
len
}
fn info_log(shader: GLuint) -> String {
unsafe {
let len = info_log_length(shader);
let mut buf = Vec::<u8>::with_capacity((len-1) as _); // -1 to skip trailing null
buf.set_len((len-1) as _);
gl::GetShaderInfoLog(shader, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar);
String::from_utf8(buf).unwrap_or("<UTF-8 error>".to_owned())
}
}
macro_rules! shader {
($Self:ident $ty:ident) => {
impl $Self {
pub fn try_from_source(src: &[u8]) -> Result<Self, String> {
let s = Self::new();
s.set_source(src);
s.compile();
match s.compile_status() {
Ok(()) => Ok(s),
Err(()) => Err(s.info_log()),
}
}
pub fn compile_checked(&self) -> Result<(), String> {
self.compile();
match self.compile_status() {
Ok(()) => Ok(()),
Err(()) => Err(self.info_log()),
}
}
pub fn set_source(&self, src: &[u8]) {
set_source(self.0, src)
}
pub fn compile(&self) {
unsafe {
gl::CompileShader(self.0);
}
}
pub fn compile_status(&self) -> Result<(), ()> {
let mut status = gl::FALSE as GLint;
unsafe {
gl::GetShaderiv(self.gl_id(), gl::COMPILE_STATUS, &mut status);
}
if status == gl::TRUE as _ { Ok(()) } else { Err(()) } | pub fn info_log(&self) -> String {
info_log(self.0)
}
}
};
}
shader!{ ComputeShader COMPUTE_SHADER }
shader!{ VertexShader VERTEX_SHADER }
shader!{ TessControlShader TESS_CONTROL_SHADER }
shader!{ TessEvaluationShader TESS_EVALUATION_SHADER }
shader!{ GeometryShader GEOMETRY_SHADER }
shader!{ FragmentShader FRAGMENT_SHADER } | }
pub fn info_log_length(&self) -> GLint {
info_log_length(self.0)
} |
view.tsx | import { css, FC, jsx } from 'alumina';
import { languageKey } from '~/ui/base';
import { ClosableOverlay } from '~/ui/components';
import { boardResetInstructionPanelAssets } from '~/ui/fabrics/StandardFirmwareFlashPart/BoardResetInstructionPanel/assets';
| };
const textSourceEn = {
panelTitle: 'How to reset the board',
summary:
'To flash firmware, put the MCU into bootloader mode by the following instruction.',
forAvr: 'For AVR',
forRp: 'For RP2040',
avrReset1:
'Press the reset button on the board twice quickly. \n (e.g. homemade keyboard with ProMicro)',
rpReset1:
'If there is a boot button and a reset button, press and release the reset button while holding down the boot button. \n (e.g. ProMicro RP2040)',
rpReset2:
'If there is only a boot button, connect the board to the PC while holding down the boot button. \n(e.g. Raspberry Pi Pico)',
rpReset3:
'If there is a reset button, you can also reset the board by pressing the reset button twice quickly for the second and subsequent writes.',
};
const textSourceJa = {
panelTitle: 'ボードのリセット方法',
summary:
'ファームウェアを書き込むために、以下の方法でMCUをブートローダモードにしてください。',
forAvr: 'AVRの場合',
forRp: 'RP2040の場合',
avrReset1:
'ボード上のリセットボタンを素早く2回押します。\n(ProMicroを使用した自作キーボードなど)',
rpReset1:
'bootボタンとリセットボタンがある場合、bootボタンを押しながらリセットボタンを押して離します。\n(ProMicro RP2040など)',
rpReset2:
'bootボタンだけしかない場合は、bootボタンを押しながらボードをPCに接続します。\n(Raspberry Pi Picoなど)',
rpReset3:
'リセットボタンがある場合、2回目以降の書込みではリセットボタンを素早く2回押すことでもリセットできます。',
};
export const BoardResetInstructionPanel: FC<Props> = ({ isOpen, close }) => {
const { IllustReset1, IllustReset2, IllustReset3 } =
boardResetInstructionPanelAssets;
const texts = languageKey === 'japanese' ? textSourceJa : textSourceEn;
return (
<ClosableOverlay close={close} if={isOpen}>
<div class={panelStyle}>
<div class="top-row">
<h2>{texts.panelTitle}</h2>
<div onClick={close} class="close-button">
<i class="fa fa-times" />
</div>
</div>
<div class="summary">{texts.summary}</div>
<h3>{texts.forAvr}</h3>
<div class="row">
<div class="frame">
<IllustReset1 />
</div>
<div class="inst">{texts.avrReset1}</div>
</div>
<h3>{texts.forRp}</h3>
<div class="row">
<div class="frame">
<IllustReset2 />
</div>
<div class="inst">{texts.rpReset1}</div>
</div>
<div class="row">
<div class="frame">
<IllustReset3 />
</div>
<div class="inst">{texts.rpReset2}</div>
</div>
<div class="row">
<div class="frame">
<IllustReset1 />
</div>
<div class="inst">{texts.rpReset3}</div>
</div>
</div>
</ClosableOverlay>
);
};
const panelStyle = css`
width: 600px;
background: #fff;
border-radius: 6px;
padding: 20px;
line-height: 1.4;
> .top-row {
display: flex;
align-items: center;
justify-content: space-between;
> .close-button {
cursor: pointer;
}
}
> h3 {
margin-top: 10px;
}
> .summary {
margin-top: 10px;
}
> .row {
& + .row {
margin-top: 8px;
}
display: flex;
> .frame {
flex-shrink: 0;
width: 140px;
height: 100px;
border: solid 1px #888;
padding: 5px;
margin-right: 10px;
}
> .inst {
white-space: pre-wrap;
}
}
> .table {
display: grid;
grid-template-columns: 100px auto;
}
`; | type Props = {
isOpen: boolean;
close(): void; |
service.go | // Package mono provides embedded microservice.
package mono
import (
"context"
"net/http"
"regexp"
"strconv"
"github.com/powerman/appcfg"
"github.com/powerman/structlog" | "github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/powerman/go-monolith-example/internal/config"
"github.com/powerman/go-monolith-example/pkg/concurrent"
"github.com/powerman/go-monolith-example/pkg/def"
"github.com/powerman/go-monolith-example/pkg/netx"
"github.com/powerman/go-monolith-example/pkg/serve"
)
// Ctx is a synonym for convenience.
type Ctx = context.Context
//nolint:gochecknoglobals // Config, flags and metrics are global anyway.
var (
fs *pflag.FlagSet
shared *config.Shared
own = &struct {
Port appcfg.Port `env:"MONO_ADDR_PORT"`
}{
Port: appcfg.MustPort(strconv.Itoa(config.MonoPort)),
}
reg = prometheus.NewPedanticRegistry()
)
// Service implements main.embeddedService interface.
type Service struct {
cfg struct {
BindAddr netx.Addr
}
mux *http.ServeMux
}
// Name implements main.embeddedService interface.
func (s *Service) Name() string { return "mono" }
// Init implements main.embeddedService interface.
func (s *Service) Init(sharedCfg *config.Shared, _, serveCmd *cobra.Command) error {
namespace := regexp.MustCompile(`[^a-zA-Z0-9]+`).ReplaceAllString(def.ProgName, "_")
initMetrics(reg, namespace)
fs, shared = serveCmd.Flags(), sharedCfg
fromEnv := appcfg.NewFromEnv(config.EnvPrefix)
err := appcfg.ProvideStruct(own, fromEnv)
pfx := s.Name() + "."
appcfg.AddPFlag(fs, &shared.AddrHostInt, "host-int", "internal host to serve")
appcfg.AddPFlag(fs, &own.Port, pfx+"port", "port to serve monolith introspection")
return err
}
// RunServe implements main.embeddedService interface.
func (s *Service) RunServe(_, ctxShutdown Ctx, shutdown func()) (err error) {
log := structlog.FromContext(ctxShutdown, nil)
if s.cfg.BindAddr.Host() == "" {
s.cfg.BindAddr = netx.NewAddr(shared.AddrHostInt.Value(&err), own.Port.Value(&err))
if err != nil {
return log.Err("failed to get config", "err", appcfg.WrapPErr(err, fs, shared, own))
}
}
s.mux = http.NewServeMux()
serve.HandleMetrics(s.mux, reg)
s.mux.Handle("/health-check", http.HandlerFunc(s.serveHealthCheck))
err = concurrent.Serve(ctxShutdown, shutdown,
s.serveHTTP,
)
if err != nil {
return log.Err("failed to serve", "err", err)
}
return nil
}
func (s *Service) serveHTTP(ctx Ctx) error {
return serve.HTTP(ctx, s.cfg.BindAddr, nil, s.mux, "monolith introspection")
} | |
options.ts | /*
* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { assert, release } from './utilities/Debug';
import { isObject } from './utilities';
/**
* Option and Argument Management
*
* Options are configuration settings sprinkled throughout the code. They can be grouped into sets of
* options called |OptionSets| which can form a hierarchy of options. For instance:
*
* var set = new OptionSet();
* var opt = set.register(new Option("v", "verbose", "boolean", false, "Enables verbose logging."));
*
* creates an option set with one option in it. The option can be changed directly using |opt.value = true| or
* automatically using the |ArgumentParser|:
*
* var parser = new ArgumentParser();
* parser.addBoundOptionSet(set);
* parser.parse(["-v"]);
*
* The |ArgumentParser| can also be used directly:
*
* var parser = new ArgumentParser();
* argumentParser.addArgument("h", "help", "boolean", {parse: function (x) {
* printUsage();
* }});
*/
export class Argument {
shortName: string;
longName: string;
type: any;
options: any;
positional: boolean;
parseFn: any;
value: any;
constructor(shortName, longName, type, options) {
this.shortName = shortName;
this.longName = longName;
this.type = type;
options = options || {};
this.positional = options.positional;
this.parseFn = options.parse;
this.value = options.defaultValue;
}
public parse(value) {
if (this.type === 'boolean') {
release || assert(typeof value === 'boolean');
this.value = value;
} else if (this.type === 'number') {
release || assert(!isNaN(value), value + ' is not a number');
this.value = parseInt(value, 10);
} else {
this.value = value;
}
if (this.parseFn) {
this.parseFn(this.value);
}
}
}
export class ArgumentParser {
args: any [];
constructor() {
this.args = [];
}
public addArgument(shortName, longName, type, options) {
const argument = new Argument(shortName, longName, type, options);
this.args.push(argument);
return argument;
}
public addBoundOption(option) {
const options = { parse: function (x) {
option.value = x;
} };
this.args.push(new Argument(option.shortName, option.longName, option.type, options));
}
public addBoundOptionSet(optionSet) {
const self = this;
optionSet.options.forEach(function (x) {
if (OptionSet.isOptionSet(x)) {
self.addBoundOptionSet(x);
} else {
release || assert(x);
self.addBoundOption(x);
}
});
}
public getUsage () {
let str = '';
this.args.forEach(function (x) {
if (!x.positional) {
str += '[-' + x.shortName + '|--' + x.longName + (x.type === 'boolean' ? '' : ' ' + x.type[0].toUpperCase()) + ']';
} else {
str += x.longName;
}
str += ' ';
});
return str;
}
public parse (args) {
const nonPositionalArgumentMap = {};
const positionalArgumentList = [];
this.args.forEach(function (x) {
if (x.positional) {
positionalArgumentList.push(x);
} else {
nonPositionalArgumentMap['-' + x.shortName] = x;
nonPositionalArgumentMap['--' + x.longName] = x;
}
});
let leftoverArguments = [];
while (args.length) {
const argString = args.shift();
let argument = null, value = argString;
if (argString == '--') {
leftoverArguments = leftoverArguments.concat(args);
break;
} else if (argString.slice(0, 1) == '-' || argString.slice(0, 2) == '--') {
argument = nonPositionalArgumentMap[argString];
// release || assert(argument, "Argument " + argString + " is unknown.");
if (!argument) {
continue;
}
if (argument.type !== 'boolean') {
value = args.shift();
release || assert(value !== '-' && value !== '--', 'Argument ' + argString + ' must have a value.');
} else {
if (args.length && ['yes', 'no', 'true', 'false', 't', 'f'].indexOf(args[0]) >= 0) {
value = ['yes', 'true', 't'].indexOf(args.shift()) >= 0;
} else {
value = true;
}
}
} else if (positionalArgumentList.length) {
argument = positionalArgumentList.shift();
} else {
leftoverArguments.push(value);
}
if (argument) {
argument.parse(value);
}
}
release || assert(positionalArgumentList.length === 0, 'Missing positional arguments.');
return leftoverArguments;
}
}
export class OptionSet {
name: string;
settings: any;
options: any;
open: boolean = false;
public static isOptionSet(obj: any): boolean {
// We will be getting options from different iframe, so this function will
// check if the obj somewhat like OptionSet.
if (obj instanceof OptionSet) {
return true;
}
if (typeof obj !== 'object' || obj === null ||
obj instanceof Option) {
return false;
}
return ('options' in obj) && ('name' in obj) && ('settings' in obj);
}
constructor(name: string, settings: any = null) {
this.name = name;
this.settings = settings || {};
this.options = [];
}
public register(option) {
if (OptionSet.isOptionSet(option)) {
// check for duplicate option sets (bail if found)
for (let i = 0; i < this.options.length; i++) {
const optionSet = this.options[i];
if (OptionSet.isOptionSet(optionSet) && optionSet.name === option.name) {
return optionSet;
}
}
}
this.options.push(option);
if (this.settings) {
if (OptionSet.isOptionSet(option)) {
const optionSettings = this.settings[option.name];
if (isObject(optionSettings)) {
option.settings = optionSettings.settings;
option.open = optionSettings.open;
}
} else {
// build_bundle chokes on this:
// if (!isNullOrUndefined(this.settings[option.longName])) {
if (typeof this.settings[option.longName] !== 'undefined') {
switch (option.type) {
case 'boolean':
option.value = !!this.settings[option.longName];
break;
case 'number':
option.value = +this.settings[option.longName];
break;
default:
option.value = this.settings[option.longName];
break;
}
}
}
}
return option;
}
public trace(writer) {
writer.enter(this.name + ' {');
this.options.forEach(function (option) {
option.trace(writer);
});
writer.leave('}');
}
public getSettings() {
const settings = {};
this.options.forEach(function(option) {
if (OptionSet.isOptionSet(option)) {
settings[option.name] = {
settings: option.getSettings(),
open: option.open
};
} else {
settings[option.longName] = option.value;
}
});
return settings;
}
public setSettings(settings: any) {
if (!settings) {
return;
}
this.options.forEach(function (option) {
if (OptionSet.isOptionSet(option)) { | if (option.longName in settings) {
option.value = settings[option.longName];
}
}
});
}
}
export class Option {
longName: string;
shortName: string;
type: string;
defaultValue: any;
value: any; // during options merge can be changed to accessor
description: string;
config: any;
/**
* Dat GUI control.
*/
// TODO remove, player will not have access to the DOM
ctrl: any;
// config:
// { range: { min: 1, max: 5, step: 1 } }
// { list: [ "item 1", "item 2", "item 3" ] }
// { choices: { "choice 1": 1, "choice 2": 2, "choice 3": 3 } }
constructor(shortName, longName, type, defaultValue, description, config = null) {
this.longName = longName;
this.shortName = shortName;
this.type = type;
this.defaultValue = defaultValue;
this.value = defaultValue;
this.description = description;
this.config = config;
}
public parse (value) {
this.value = value;
}
public trace (writer) {
writer.writeLn(('-' + this.shortName + '|--' + this.longName).padRight(' ', 30) +
' = ' + this.type + ' ' + this.value + ' [' + this.defaultValue + ']' +
' (' + this.description + ')');
}
} | if (option.name in settings) {
option.setSettings(settings[option.name].settings);
}
} else { |
mod.rs | #![allow(missing_docs)]
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Simple time handling.
//! This crate uses the same syntax for format strings as the
//! [`strftime()`](http://man7.org/linux/man-pages/man3/strftime.3.html)
//! function from the C standard library.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/time/")]
#![allow(trivial_numeric_casts, ellipsis_inclusive_range_patterns)]
mod display;
mod duration;
mod parse;
mod sys;
use std::cmp::Ordering;
use std::error::Error;
use std::fmt;
use std::ops::{Add, Sub};
pub use self::duration::{Duration, OutOfRangeError};
use self::ParseError::{InvalidDay, InvalidDayOfMonth, InvalidDayOfWeek,
InvalidDayOfYear, InvalidFormatSpecifier, InvalidHour,
InvalidMinute, InvalidMonth, InvalidSecond, InvalidTime,
InvalidYear, InvalidZoneOffset, InvalidSecondsSinceEpoch,
MissingFormatConverter, UnexpectedCharacter};
pub use self::parse::strptime;
pub static NSEC_PER_SEC: i32 = 1_000_000_000;
/// A record specifying a time value in seconds and nanoseconds, where
/// nanoseconds represent the offset from the given second.
///
/// For example a timespec of 1.2 seconds after the beginning of the epoch would
/// be represented as {sec: 1, nsec: 200000000}.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
#[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))]
pub struct Timespec { pub sec: i64, pub nsec: i32 }
/*
* Timespec assumes that pre-epoch Timespecs have negative sec and positive
* nsec fields. Darwin's and Linux's struct timespec functions handle pre-
* epoch timestamps using a "two steps back, one step forward" representation,
* though the man pages do not actually document this. For example, the time
* -1.2 seconds before the epoch is represented by `Timespec { sec: -2_i64,
* nsec: 800_000_000 }`.
*/
impl Timespec {
pub fn new(sec: i64, nsec: i32) -> Timespec {
assert!(nsec >= 0 && nsec < NSEC_PER_SEC);
Timespec { sec: sec, nsec: nsec }
}
}
impl Add<Duration> for Timespec {
type Output = Timespec;
fn add(self, other: Duration) -> Timespec {
let d_sec = other.num_seconds();
// It is safe to unwrap the nanoseconds, because there cannot be
// more than one second left, which fits in i64 and in i32.
let d_nsec = (other - Duration::seconds(d_sec))
.num_nanoseconds().unwrap() as i32;
let mut sec = self.sec + d_sec;
let mut nsec = self.nsec + d_nsec;
if nsec >= NSEC_PER_SEC {
nsec -= NSEC_PER_SEC;
sec += 1;
} else if nsec < 0 {
nsec += NSEC_PER_SEC;
sec -= 1;
}
Timespec::new(sec, nsec)
}
}
impl Sub<Duration> for Timespec {
type Output = Timespec;
fn sub(self, other: Duration) -> Timespec {
let d_sec = other.num_seconds();
// It is safe to unwrap the nanoseconds, because there cannot be
// more than one second left, which fits in i64 and in i32.
let d_nsec = (other - Duration::seconds(d_sec))
.num_nanoseconds().unwrap() as i32;
let mut sec = self.sec - d_sec;
let mut nsec = self.nsec - d_nsec;
if nsec >= NSEC_PER_SEC {
nsec -= NSEC_PER_SEC;
sec += 1;
} else if nsec < 0 {
nsec += NSEC_PER_SEC;
sec -= 1;
}
Timespec::new(sec, nsec)
}
}
impl Sub<Timespec> for Timespec {
type Output = Duration;
fn sub(self, other: Timespec) -> Duration {
let sec = self.sec - other.sec;
let nsec = self.nsec - other.nsec;
Duration::seconds(sec) + Duration::nanoseconds(nsec as i64)
}
}
/**
* Returns the current time as a `timespec` containing the seconds and
* nanoseconds since 1970-01-01T00:00:00Z.
*/
pub fn get_time() -> Timespec {
let (sec, nsec) = sys::get_time();
Timespec::new(sec, nsec)
}
/**
* Returns the current value of a high-resolution performance counter
* in nanoseconds since an unspecified epoch.
*/
#[inline]
pub fn precise_time_ns() -> u64 {
sys::get_precise_ns()
}
/**
* Returns the current value of a high-resolution performance counter
* in seconds since an unspecified epoch.
*/
#[allow(unused)]
pub fn precise_time_s() -> f64 {
return (precise_time_ns() as f64) / 1000000000.;
}
/// An opaque structure representing a moment in time.
///
/// The only operation that can be performed on a `PreciseTime` is the
/// calculation of the `Duration` of time that lies between them.
///
/// # Examples
///
/// Repeatedly call a function for 1 second:
///
/// ```rust
/// use chrono::{Duration, PreciseTime};
/// # fn do_some_work() {}
///
/// let start = PreciseTime::now();
///
/// while start.to(PreciseTime::now()) < Duration::seconds(1) {
/// do_some_work();
/// }
/// ```
#[derive(Debug, Copy, Clone)]
pub struct PreciseTime(u64);
impl PreciseTime {
/// Returns a `PreciseTime` representing the current moment in time.
pub fn now() -> PreciseTime {
PreciseTime(precise_time_ns())
}
/// Returns a `Duration` representing the span of time from the value of
/// `self` to the value of `later`.
///
/// # Notes
///
/// If `later` represents a time before `self`, the result of this method
/// is unspecified.
///
/// If `later` represents a time more than 293 years after `self`, the
/// result of this method is unspecified.
#[inline]
pub fn to(&self, later: PreciseTime) -> Duration {
// NB: even if later is less than self due to overflow, this will work
// since the subtraction will underflow properly as well.
//
// We could deal with the overflow when casting to an i64, but all that
// gets us is the ability to handle intervals of up to 584 years, which
// seems not very useful :)
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
/// A structure representing a moment in time.
///
/// `SteadyTime`s are generated by a "steady" clock, that is, a clock which
/// never experiences discontinuous jumps and for which time always flows at
/// the same rate.
///
/// # Examples
///
/// Repeatedly call a function for 1 second:
///
/// ```rust
/// # use chrono::{Duration, SteadyTime};
/// # fn do_some_work() {}
/// let start = SteadyTime::now();
///
/// while SteadyTime::now() - start < Duration::seconds(1) {
/// do_some_work();
/// }
/// ```
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug)]
pub struct SteadyTime(sys::SteadyTime);
impl SteadyTime {
/// Returns a `SteadyTime` representing the current moment in time.
pub fn now() -> SteadyTime {
SteadyTime(sys::SteadyTime::now())
}
}
impl fmt::Display for SteadyTime {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
// TODO: needs a display customization
fmt::Debug::fmt(self, fmt)
}
}
impl Sub for SteadyTime {
type Output = Duration;
fn sub(self, other: SteadyTime) -> Duration {
self.0 - other.0
}
}
impl Sub<Duration> for SteadyTime {
type Output = SteadyTime;
fn sub(self, other: Duration) -> SteadyTime {
SteadyTime(self.0 - other)
}
}
impl Add<Duration> for SteadyTime {
type Output = SteadyTime;
fn add(self, other: Duration) -> SteadyTime {
SteadyTime(self.0 + other)
}
}
#[cfg(not(windows))]
#[allow(unused)]
pub fn tzset() {
extern { fn tzset(); }
unsafe { tzset() }
}
#[cfg(windows)]
pub fn tzset() {}
/// Holds a calendar date and time broken down into its components (year, month,
/// day, and so on), also called a broken-down time value.
// FIXME: use c_int instead of i32?
#[repr(C)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))]
pub struct Tm {
/// Seconds after the minute - [0, 60]
pub tm_sec: i32,
/// Minutes after the hour - [0, 59]
pub tm_min: i32,
/// Hours after midnight - [0, 23]
pub tm_hour: i32,
/// Day of the month - [1, 31]
pub tm_mday: i32,
/// Months since January - [0, 11]
pub tm_mon: i32,
/// Years since 1900
pub tm_year: i32,
/// Days since Sunday - [0, 6]. 0 = Sunday, 1 = Monday, ..., 6 = Saturday.
pub tm_wday: i32,
/// Days since January 1 - [0, 365]
pub tm_yday: i32,
/// Daylight Saving Time flag.
///
/// This value is positive if Daylight Saving Time is in effect, zero if
/// Daylight Saving Time is not in effect, and negative if this information
/// is not available.
pub tm_isdst: i32,
/// Identifies the time zone that was used to compute this broken-down time
/// value, including any adjustment for Daylight Saving Time. This is the
/// number of seconds east of UTC. For example, for U.S. Pacific Daylight
/// Time, the value is `-7*60*60 = -25200`.
pub tm_utcoff: i32,
/// Nanoseconds after the second - [0, 10<sup>9</sup> - 1]
pub tm_nsec: i32,
}
impl Add<Duration> for Tm {
type Output = Tm;
/// The resulting Tm is in UTC.
// FIXME: The resulting Tm should have the same timezone as `self`;
// however, we need a function such as `at_tm(clock: Timespec, offset: i32)`
// for this.
fn add(self, other: Duration) -> Tm {
at_utc(self.to_timespec() + other)
}
}
impl Sub<Duration> for Tm {
type Output = Tm;
/// The resulting Tm is in UTC.
// FIXME: The resulting Tm should have the same timezone as `self`;
// however, we need a function such as `at_tm(clock: Timespec, offset: i32)`
// for this.
fn sub(self, other: Duration) -> Tm {
at_utc(self.to_timespec() - other)
}
}
impl Sub<Tm> for Tm {
type Output = Duration;
fn sub(self, other: Tm) -> Duration {
self.to_timespec() - other.to_timespec()
}
}
impl PartialOrd for Tm {
fn partial_cmp(&self, other: &Tm) -> Option<Ordering> {
self.to_timespec().partial_cmp(&other.to_timespec())
}
}
impl Ord for Tm {
fn cmp(&self, other: &Tm) -> Ordering {
self.to_timespec().cmp(&other.to_timespec())
}
}
pub fn empty_tm() -> Tm {
Tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_utcoff: 0,
tm_nsec: 0,
}
}
/// Returns the specified time in UTC
pub fn at_utc(clock: Timespec) -> Tm {
let Timespec { sec, nsec } = clock;
let mut tm = empty_tm();
sys::time_to_utc_tm(sec, &mut tm);
tm.tm_nsec = nsec;
tm
}
/// Returns the current time in UTC
#[allow(unused)]
pub fn now_utc() -> Tm {
at_utc(get_time())
}
/// Returns the specified time in the local timezone
pub fn at(clock: Timespec) -> Tm {
let Timespec { sec, nsec } = clock;
let mut tm = empty_tm();
sys::time_to_local_tm(sec, &mut tm);
tm.tm_nsec = nsec;
tm
}
/// Returns the current time in the local timezone
pub fn now() -> Tm {
at(get_time())
}
impl Tm {
/// Convert time to the seconds from January 1, 1970
pub fn to_timespec(&self) -> Timespec {
let sec = match self.tm_utcoff {
0 => sys::utc_tm_to_time(self),
_ => sys::local_tm_to_time(self)
};
Timespec::new(sec, self.tm_nsec)
}
/// Convert time to the local timezone
pub fn to_local(&self) -> Tm {
at(self.to_timespec())
}
/// Convert time to the UTC
#[allow(unused)]
pub fn to_utc(&self) -> Tm {
match self.tm_utcoff {
0 => *self,
_ => at_utc(self.to_timespec())
}
}
/**
* Returns a TmFmt that outputs according to the `asctime` format in ISO
* C, in the local timezone.
*
* Example: "Thu Jan 1 00:00:00 1970"
*/
#[allow(unused)]
pub fn ctime(&self) -> TmFmt {
TmFmt {
tm: self,
format: Fmt::Ctime,
}
}
/**
* Returns a TmFmt that outputs according to the `asctime` format in ISO
* C.
*
* Example: "Thu Jan 1 00:00:00 1970"
*/
pub fn asctime(&self) -> TmFmt {
TmFmt {
tm: self,
format: Fmt::Str("%c"),
}
}
/// Formats the time according to the format string.
#[allow(unused)]
pub fn strftime<'a>(&'a self, format: &'a str) -> Result<TmFmt<'a>, ParseError> {
validate_format(TmFmt {
tm: self,
format: Fmt::Str(format),
})
}
/**
* Returns a TmFmt that outputs according to RFC 822.
*
* local: "Thu, 22 Mar 2012 07:53:18 PST"
* utc: "Thu, 22 Mar 2012 14:53:18 GMT"
*/
#[allow(unused)]
pub fn rfc822(&self) -> TmFmt {
let fmt = if self.tm_utcoff == 0 {
"%a, %d %b %Y %T GMT"
} else {
"%a, %d %b %Y %T %Z"
};
TmFmt {
tm: self,
format: Fmt::Str(fmt),
}
}
/**
* Returns a TmFmt that outputs according to RFC 822 with Zulu time.
*
* local: "Thu, 22 Mar 2012 07:53:18 -0700"
* utc: "Thu, 22 Mar 2012 14:53:18 -0000"
*/
#[allow(unused)]
pub fn rfc822z(&self) -> TmFmt {
TmFmt {
tm: self,
format: Fmt::Str("%a, %d %b %Y %T %z"),
}
}
/**
* Returns a TmFmt that outputs according to RFC 3339. RFC 3339 is
* compatible with ISO 8601.
*
* local: "2012-02-22T07:53:18-07:00"
* utc: "2012-02-22T14:53:18Z"
*/
pub fn rfc3339<'a>(&'a self) -> TmFmt {
TmFmt {
tm: self,
format: Fmt::Rfc3339,
}
}
}
#[derive(Copy, PartialEq, Debug, Clone)]
pub enum ParseError {
InvalidSecond,
InvalidMinute,
InvalidHour,
InvalidDay,
InvalidMonth,
InvalidYear,
InvalidDayOfWeek,
InvalidDayOfMonth,
InvalidDayOfYear,
InvalidZoneOffset,
InvalidTime,
InvalidSecondsSinceEpoch,
MissingFormatConverter,
InvalidFormatSpecifier(char),
UnexpectedCharacter(char, char),
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
InvalidFormatSpecifier(ch) => {
write!(f, "{}: %{}", self.description(), ch)
}
UnexpectedCharacter(a, b) => {
write!(f, "expected: `{}`, found: `{}`", a, b)
}
_ => write!(f, "{}", self.description())
}
}
}
impl Error for ParseError {
fn description(&self) -> &str {
match *self {
InvalidSecond => "Invalid second.",
InvalidMinute => "Invalid minute.",
InvalidHour => "Invalid hour.",
InvalidDay => "Invalid day.",
InvalidMonth => "Invalid month.",
InvalidYear => "Invalid year.",
InvalidDayOfWeek => "Invalid day of the week.",
InvalidDayOfMonth => "Invalid day of the month.",
InvalidDayOfYear => "Invalid day of the year.",
InvalidZoneOffset => "Invalid zone offset.",
InvalidTime => "Invalid time.",
InvalidSecondsSinceEpoch => "Invalid seconds since epoch.",
MissingFormatConverter => "missing format converter after `%`",
InvalidFormatSpecifier(..) => "invalid format specifier",
UnexpectedCharacter(..) => "Unexpected character.",
}
}
}
/// A wrapper around a `Tm` and format string that implements Display.
#[derive(Debug)]
pub struct TmFmt<'a> {
tm: &'a Tm,
format: Fmt<'a>
}
#[derive(Debug)]
enum Fmt<'a> {
Str(&'a str),
Rfc3339,
Ctime,
}
fn validate_format<'a>(fmt: TmFmt<'a>) -> Result<TmFmt<'a>, ParseError> {
match (fmt.tm.tm_wday, fmt.tm.tm_mon) {
(0...6, 0...11) => (),
(_wday, 0...11) => return Err(InvalidDayOfWeek),
(0...6, _mon) => return Err(InvalidMonth),
_ => return Err(InvalidDay)
}
match fmt.format {
Fmt::Str(ref s) => {
let mut chars = s.chars();
loop {
match chars.next() {
Some('%') => {
match chars.next() {
Some('A') | Some('a') | Some('B') | Some('b') |
Some('C') | Some('c') | Some('D') | Some('d') |
Some('e') | Some('F') | Some('f') | Some('G') |
Some('g') | Some('H') | Some('h') | Some('I') |
Some('j') | Some('k') | Some('l') | Some('M') |
Some('m') | Some('n') | Some('P') | Some('p') |
Some('R') | Some('r') | Some('S') | Some('s') |
Some('T') | Some('t') | Some('U') | Some('u') |
Some('V') | Some('v') | Some('W') | Some('w') |
Some('X') | Some('x') | Some('Y') | Some('y') |
Some('Z') | Some('z') | Some('+') | Some('%') => (),
Some(c) => return Err(InvalidFormatSpecifier(c)),
None => return Err(MissingFormatConverter),
}
},
None => break,
_ => ()
}
}
},
_ => ()
}
Ok(fmt)
}
/// Formats the time according to the format string.
#[allow(unused)]
pub fn strftime(format: &str, tm: &Tm) -> Result<String, ParseError> {
tm.strftime(format).map(|fmt| fmt.to_string())
}
#[cfg(test)]
mod tests {
use ::time::sys;
use super::{Timespec, get_time, precise_time_ns, precise_time_s,
at_utc, at, strptime, PreciseTime, SteadyTime, ParseError, Duration};
use super::ParseError::{InvalidTime, InvalidYear, MissingFormatConverter,
InvalidFormatSpecifier};
use std::sync::{Once, Mutex, MutexGuard, LockResult};
use std::mem;
struct TzReset {
_tzreset: sys::TzReset,
_lock: LockResult<MutexGuard<'static, ()>>,
}
fn set_time_zone_la_or_london(london: bool) -> TzReset {
// Lock manages current timezone because some tests require LA some
// London
static mut LOCK: *mut Mutex<()> = 0 as *mut _;
static INIT: Once = Once::new();
unsafe {
INIT.call_once(|| {
LOCK = mem::transmute(Box::new(Mutex::new(())));
});
let timezone_lock = (*LOCK).lock();
let reset_func = if london {
sys::set_london_with_dst_time_zone()
} else {
sys::set_los_angeles_time_zone()
};
TzReset {
_lock: timezone_lock,
_tzreset: reset_func,
}
}
}
fn set_time_zone() -> TzReset {
set_time_zone_la_or_london(false)
}
fn set_time_zone_london_dst() -> TzReset {
set_time_zone_la_or_london(true)
}
#[test]
fn test_get_time() {
static SOME_RECENT_DATE: i64 = 1325376000i64; // 2012-01-01T00:00:00Z
static SOME_FUTURE_DATE: i64 = 1577836800i64; // 2020-01-01T00:00:00Z
let tv1 = get_time();
debug!("tv1={} sec + {} nsec", tv1.sec, tv1.nsec);
assert!(tv1.sec > SOME_RECENT_DATE);
assert!(tv1.nsec < 1000000000i32);
let tv2 = get_time();
debug!("tv2={} sec + {} nsec", tv2.sec, tv2.nsec);
assert!(tv2.sec >= tv1.sec);
assert!(tv2.sec < SOME_FUTURE_DATE);
assert!(tv2.nsec < 1000000000i32);
if tv2.sec == tv1.sec {
assert!(tv2.nsec >= tv1.nsec);
}
}
#[test]
fn test_precise_time() {
let s0 = precise_time_s();
debug!("s0={} sec", s0);
assert!(s0 > 0.);
let ns0 = precise_time_ns();
let ns1 = precise_time_ns();
debug!("ns0={} ns", ns0);
debug!("ns1={} ns", ns1);
assert!(ns1 >= ns0);
let ns2 = precise_time_ns();
debug!("ns2={} ns", ns2);
assert!(ns2 >= ns1);
}
#[test]
fn test_precise_time_to() {
let t0 = PreciseTime(1000);
let t1 = PreciseTime(1023);
assert_eq!(Duration::nanoseconds(23), t0.to(t1));
}
#[test]
fn test_at_utc() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
assert_eq!(utc.tm_sec, 30);
assert_eq!(utc.tm_min, 31);
assert_eq!(utc.tm_hour, 23);
assert_eq!(utc.tm_mday, 13);
assert_eq!(utc.tm_mon, 1);
assert_eq!(utc.tm_year, 109);
assert_eq!(utc.tm_wday, 5);
assert_eq!(utc.tm_yday, 43);
assert_eq!(utc.tm_isdst, 0);
assert_eq!(utc.tm_utcoff, 0);
assert_eq!(utc.tm_nsec, 54321);
}
#[test]
fn test_at() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let local = at(time);
debug!("time_at: {:?}", local);
assert_eq!(local.tm_sec, 30);
assert_eq!(local.tm_min, 31);
assert_eq!(local.tm_hour, 15);
assert_eq!(local.tm_mday, 13);
assert_eq!(local.tm_mon, 1);
assert_eq!(local.tm_year, 109);
assert_eq!(local.tm_wday, 5);
assert_eq!(local.tm_yday, 43);
assert_eq!(local.tm_isdst, 0);
assert_eq!(local.tm_utcoff, -28800);
assert_eq!(local.tm_nsec, 54321);
}
#[test]
fn test_to_timespec() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
assert_eq!(utc.to_timespec(), time);
assert_eq!(utc.to_local().to_timespec(), time);
}
#[test]
fn test_conversions() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
let local = at(time);
assert!(local.to_local() == local);
assert!(local.to_utc() == utc);
assert!(local.to_utc().to_local() == local);
assert!(utc.to_utc() == utc);
assert!(utc.to_local() == local);
assert!(utc.to_local().to_utc() == utc);
}
#[test]
fn test_strptime() {
let _reset = set_time_zone();
match strptime("", "") {
Ok(ref tm) => {
assert!(tm.tm_sec == 0);
assert!(tm.tm_min == 0);
assert!(tm.tm_hour == 0);
assert!(tm.tm_mday == 0);
assert!(tm.tm_mon == 0);
assert!(tm.tm_year == 0);
assert!(tm.tm_wday == 0);
assert!(tm.tm_isdst == 0);
assert!(tm.tm_utcoff == 0);
assert!(tm.tm_nsec == 0);
}
Err(_) => ()
}
let format = "%a %b %e %T.%f %Y";
assert_eq!(strptime("", format), Err(ParseError::InvalidDay));
assert_eq!(strptime("Fri Feb 13 15:31:30", format),
Err(InvalidTime));
match strptime("Fri Feb 13 15:31:30.01234 2009", format) {
Err(e) => panic!("{}", e),
Ok(ref tm) => {
assert_eq!(tm.tm_sec, 30);
assert_eq!(tm.tm_min, 31);
assert_eq!(tm.tm_hour, 15);
assert_eq!(tm.tm_mday, 13);
assert_eq!(tm.tm_mon, 1);
assert_eq!(tm.tm_year, 109);
assert_eq!(tm.tm_wday, 5);
assert_eq!(tm.tm_yday, 0);
assert_eq!(tm.tm_isdst, 0);
assert_eq!(tm.tm_utcoff, 0);
assert_eq!(tm.tm_nsec, 12340000);
}
}
fn test(s: &str, format: &str) -> bool {
match strptime(s, format) {
Ok(tm) => {
tm.strftime(format).unwrap().to_string() == s.to_string()
},
Err(e) => panic!("{:?}, s={:?}, format={:?}", e, s, format)
}
}
fn test_oneway(s : &str, format : &str) -> bool {
match strptime(s, format) {
Ok(_) => {
// oneway tests are used when reformatting the parsed Tm
// back into a string can generate a different string
// from the original (i.e. leading zeroes)
true
},
Err(e) => panic!("{:?}, s={:?}, format={:?}", e, s, format)
}
}
let days = [
"Sunday".to_string(),
"Monday".to_string(),
"Tuesday".to_string(),
"Wednesday".to_string(),
"Thursday".to_string(),
"Friday".to_string(),
"Saturday".to_string()
];
for day in days.iter() {
assert!(test(&day, "%A"));
}
let days = [
"Sun".to_string(),
"Mon".to_string(),
"Tue".to_string(),
"Wed".to_string(),
"Thu".to_string(),
"Fri".to_string(),
"Sat".to_string()
];
for day in days.iter() {
assert!(test(&day, "%a"));
}
let months = [
"January".to_string(),
"February".to_string(),
"March".to_string(),
"April".to_string(),
"May".to_string(),
"June".to_string(),
"July".to_string(),
"August".to_string(),
"September".to_string(),
"October".to_string(),
"November".to_string(),
"December".to_string()
];
for day in months.iter() {
assert!(test(&day, "%B"));
}
let months = [
"Jan".to_string(),
"Feb".to_string(),
"Mar".to_string(),
"Apr".to_string(),
"May".to_string(),
"Jun".to_string(),
"Jul".to_string(),
"Aug".to_string(),
"Sep".to_string(),
"Oct".to_string(),
"Nov".to_string(),
"Dec".to_string()
];
for day in months.iter() {
assert!(test(&day, "%b"));
}
assert!(test("19", "%C"));
assert!(test("Fri Feb 3 23:31:30 2009", "%c"));
assert!(test("Fri Feb 13 23:31:30 2009", "%c"));
assert!(test("02/13/09", "%D"));
assert!(test("03", "%d"));
assert!(test("13", "%d"));
assert!(test(" 3", "%e"));
assert!(test("13", "%e"));
assert!(test("2009-02-13", "%F"));
assert!(test("03", "%H"));
assert!(test("13", "%H"));
assert!(test("03", "%I")); // FIXME (#2350): flesh out
assert!(test("11", "%I")); // FIXME (#2350): flesh out
assert!(test("044", "%j"));
assert!(test(" 3", "%k"));
assert!(test("13", "%k"));
assert!(test(" 1", "%l"));
assert!(test("11", "%l"));
assert!(test("03", "%M"));
assert!(test("13", "%M"));
assert!(test("\n", "%n"));
assert!(test("am", "%P"));
assert!(test("pm", "%P"));
assert!(test("AM", "%p"));
assert!(test("PM", "%p"));
assert!(test("23:31", "%R"));
assert!(test("11:31:30 AM", "%r"));
assert!(test("11:31:30 PM", "%r"));
assert!(test("03", "%S"));
assert!(test("13", "%S"));
assert!(test("15:31:30", "%T"));
assert!(test("\t", "%t"));
assert!(test("1", "%u"));
assert!(test("7", "%u"));
assert!(test("13-Feb-2009", "%v"));
assert!(test("0", "%w"));
assert!(test("6", "%w"));
assert!(test("2009", "%Y"));
assert!(test("09", "%y"));
assert!(test_oneway("3", "%d"));
assert!(test_oneway("3", "%H"));
assert!(test_oneway("3", "%e"));
assert!(test_oneway("3", "%M"));
assert!(test_oneway("3", "%S"));
assert!(strptime("-0000", "%z").unwrap().tm_utcoff == 0);
assert!(strptime("-00:00", "%z").unwrap().tm_utcoff == 0);
assert!(strptime("Z", "%z").unwrap().tm_utcoff == 0);
assert_eq!(-28800, strptime("-0800", "%z").unwrap().tm_utcoff);
assert_eq!(-28800, strptime("-08:00", "%z").unwrap().tm_utcoff);
assert_eq!(28800, strptime("+0800", "%z").unwrap().tm_utcoff);
assert_eq!(28800, strptime("+08:00", "%z").unwrap().tm_utcoff);
assert_eq!(5400, strptime("+0130", "%z").unwrap().tm_utcoff);
assert_eq!(5400, strptime("+01:30", "%z").unwrap().tm_utcoff);
assert!(test("%", "%%"));
// Test for #7256
assert_eq!(strptime("360", "%Y-%m-%d"), Err(InvalidYear));
// Test for epoch seconds parsing
{
assert!(test("1428035610", "%s"));
let tm = strptime("1428035610", "%s").unwrap();
assert_eq!(tm.tm_utcoff, 0);
assert_eq!(tm.tm_isdst, 0);
assert_eq!(tm.tm_yday, 92);
assert_eq!(tm.tm_wday, 5);
assert_eq!(tm.tm_year, 115);
assert_eq!(tm.tm_mon, 3);
assert_eq!(tm.tm_mday, 3);
assert_eq!(tm.tm_hour, 4);
}
}
#[test]
fn test_asctime() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
let local = at(time);
debug!("test_ctime: {} {}", utc.asctime(), local.asctime());
assert_eq!(utc.asctime().to_string(), "Fri Feb 13 23:31:30 2009".to_string());
assert_eq!(local.asctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
}
#[test]
fn test_ctime() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
let local = at(time);
debug!("test_ctime: {} {}", utc.ctime(), local.ctime());
assert_eq!(utc.ctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
assert_eq!(local.ctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
}
#[test]
fn test_strftime() {
let _reset = set_time_zone();
let time = Timespec::new(1234567890, 54321);
let utc = at_utc(time);
let local = at(time);
assert_eq!(local.strftime("").unwrap().to_string(), "".to_string());
assert_eq!(local.strftime("%A").unwrap().to_string(), "Friday".to_string());
assert_eq!(local.strftime("%a").unwrap().to_string(), "Fri".to_string());
assert_eq!(local.strftime("%B").unwrap().to_string(), "February".to_string());
assert_eq!(local.strftime("%b").unwrap().to_string(), "Feb".to_string());
assert_eq!(local.strftime("%C").unwrap().to_string(), "20".to_string());
assert_eq!(local.strftime("%c").unwrap().to_string(),
"Fri Feb 13 15:31:30 2009".to_string());
assert_eq!(local.strftime("%D").unwrap().to_string(), "02/13/09".to_string());
assert_eq!(local.strftime("%d").unwrap().to_string(), "13".to_string());
assert_eq!(local.strftime("%e").unwrap().to_string(), "13".to_string());
assert_eq!(local.strftime("%F").unwrap().to_string(), "2009-02-13".to_string());
assert_eq!(local.strftime("%f").unwrap().to_string(), "000054321".to_string());
assert_eq!(local.strftime("%G").unwrap().to_string(), "2009".to_string());
assert_eq!(local.strftime("%g").unwrap().to_string(), "09".to_string());
assert_eq!(local.strftime("%H").unwrap().to_string(), "15".to_string());
assert_eq!(local.strftime("%h").unwrap().to_string(), "Feb".to_string());
assert_eq!(local.strftime("%I").unwrap().to_string(), "03".to_string());
assert_eq!(local.strftime("%j").unwrap().to_string(), "044".to_string());
assert_eq!(local.strftime("%k").unwrap().to_string(), "15".to_string());
assert_eq!(local.strftime("%l").unwrap().to_string(), " 3".to_string());
assert_eq!(local.strftime("%M").unwrap().to_string(), "31".to_string());
assert_eq!(local.strftime("%m").unwrap().to_string(), "02".to_string());
assert_eq!(local.strftime("%n").unwrap().to_string(), "\n".to_string());
assert_eq!(local.strftime("%P").unwrap().to_string(), "pm".to_string());
assert_eq!(local.strftime("%p").unwrap().to_string(), "PM".to_string());
assert_eq!(local.strftime("%R").unwrap().to_string(), "15:31".to_string());
assert_eq!(local.strftime("%r").unwrap().to_string(), "03:31:30 PM".to_string());
assert_eq!(local.strftime("%S").unwrap().to_string(), "30".to_string());
assert_eq!(local.strftime("%s").unwrap().to_string(), "1234567890".to_string());
assert_eq!(local.strftime("%T").unwrap().to_string(), "15:31:30".to_string());
assert_eq!(local.strftime("%t").unwrap().to_string(), "\t".to_string());
assert_eq!(local.strftime("%U").unwrap().to_string(), "06".to_string());
assert_eq!(local.strftime("%u").unwrap().to_string(), "5".to_string());
assert_eq!(local.strftime("%V").unwrap().to_string(), "07".to_string());
assert_eq!(local.strftime("%v").unwrap().to_string(), "13-Feb-2009".to_string());
assert_eq!(local.strftime("%W").unwrap().to_string(), "06".to_string());
assert_eq!(local.strftime("%w").unwrap().to_string(), "5".to_string());
// FIXME (#2350): support locale
assert_eq!(local.strftime("%X").unwrap().to_string(), "15:31:30".to_string());
// FIXME (#2350): support locale
assert_eq!(local.strftime("%x").unwrap().to_string(), "02/13/09".to_string());
assert_eq!(local.strftime("%Y").unwrap().to_string(), "2009".to_string()); | // FIXME (#2350): support locale
assert_eq!(local.strftime("%Z").unwrap().to_string(), "".to_string());
assert_eq!(local.strftime("%z").unwrap().to_string(), "-0800".to_string());
assert_eq!(local.strftime("%+").unwrap().to_string(),
"2009-02-13T15:31:30-08:00".to_string());
assert_eq!(local.strftime("%%").unwrap().to_string(), "%".to_string());
let invalid_specifiers = ["%E", "%J", "%K", "%L", "%N", "%O", "%o", "%Q", "%q"];
for &sp in invalid_specifiers.iter() {
assert_eq!(local.strftime(sp).unwrap_err(),
InvalidFormatSpecifier(sp[1..].chars().next().unwrap()));
}
assert_eq!(local.strftime("%").unwrap_err(), MissingFormatConverter);
assert_eq!(local.strftime("%A %").unwrap_err(), MissingFormatConverter);
assert_eq!(local.asctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
assert_eq!(local.ctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
assert_eq!(local.rfc822z().to_string(), "Fri, 13 Feb 2009 15:31:30 -0800".to_string());
assert_eq!(local.rfc3339().to_string(), "2009-02-13T15:31:30-08:00".to_string());
assert_eq!(utc.asctime().to_string(), "Fri Feb 13 23:31:30 2009".to_string());
assert_eq!(utc.ctime().to_string(), "Fri Feb 13 15:31:30 2009".to_string());
assert_eq!(utc.rfc822().to_string(), "Fri, 13 Feb 2009 23:31:30 GMT".to_string());
assert_eq!(utc.rfc822z().to_string(), "Fri, 13 Feb 2009 23:31:30 -0000".to_string());
assert_eq!(utc.rfc3339().to_string(), "2009-02-13T23:31:30Z".to_string());
}
#[test]
fn test_timespec_eq_ord() {
let a = &Timespec::new(-2, 1);
let b = &Timespec::new(-1, 2);
let c = &Timespec::new(1, 2);
let d = &Timespec::new(2, 1);
let e = &Timespec::new(2, 1);
assert!(d.eq(e));
assert!(c.ne(e));
assert!(a.lt(b));
assert!(b.lt(c));
assert!(c.lt(d));
assert!(a.le(b));
assert!(b.le(c));
assert!(c.le(d));
assert!(d.le(e));
assert!(e.le(d));
assert!(b.ge(a));
assert!(c.ge(b));
assert!(d.ge(c));
assert!(e.ge(d));
assert!(d.ge(e));
assert!(b.gt(a));
assert!(c.gt(b));
assert!(d.gt(c));
}
#[test]
#[allow(deprecated)]
fn test_timespec_hash() {
use std::hash::{Hash, Hasher};
let c = &Timespec::new(3, 2);
let d = &Timespec::new(2, 1);
let e = &Timespec::new(2, 1);
let mut hasher = ::std::hash::SipHasher::new();
let d_hash:u64 = {
d.hash(&mut hasher);
hasher.finish()
};
hasher = ::std::hash::SipHasher::new();
let e_hash:u64 = {
e.hash(&mut hasher);
hasher.finish()
};
hasher = ::std::hash::SipHasher::new();
let c_hash:u64 = {
c.hash(&mut hasher);
hasher.finish()
};
assert_eq!(d_hash, e_hash);
assert!(c_hash != e_hash);
}
#[test]
fn test_timespec_add() {
let a = Timespec::new(1, 2);
let b = Duration::seconds(2) + Duration::nanoseconds(3);
let c = a + b;
assert_eq!(c.sec, 3);
assert_eq!(c.nsec, 5);
let p = Timespec::new(1, super::NSEC_PER_SEC - 2);
let q = Duration::seconds(2) + Duration::nanoseconds(2);
let r = p + q;
assert_eq!(r.sec, 4);
assert_eq!(r.nsec, 0);
let u = Timespec::new(1, super::NSEC_PER_SEC - 2);
let v = Duration::seconds(2) + Duration::nanoseconds(3);
let w = u + v;
assert_eq!(w.sec, 4);
assert_eq!(w.nsec, 1);
let k = Timespec::new(1, 0);
let l = Duration::nanoseconds(-1);
let m = k + l;
assert_eq!(m.sec, 0);
assert_eq!(m.nsec, 999_999_999);
}
#[test]
fn test_timespec_sub() {
let a = Timespec::new(2, 3);
let b = Timespec::new(1, 2);
let c = a - b;
assert_eq!(c.num_nanoseconds(), Some(super::NSEC_PER_SEC as i64 + 1));
let p = Timespec::new(2, 0);
let q = Timespec::new(1, 2);
let r = p - q;
assert_eq!(r.num_nanoseconds(), Some(super::NSEC_PER_SEC as i64 - 2));
let u = Timespec::new(1, 2);
let v = Timespec::new(2, 3);
let w = u - v;
assert_eq!(w.num_nanoseconds(), Some(-super::NSEC_PER_SEC as i64 - 1));
}
#[test]
fn test_time_sub() {
let a = ::time::now();
let b = at(a.to_timespec() + Duration::seconds(5));
let c = b - a;
assert_eq!(c.num_nanoseconds(), Some(super::NSEC_PER_SEC as i64 * 5));
}
#[test]
fn test_steadytime_sub() {
let a = SteadyTime::now();
let b = a + Duration::seconds(1);
assert_eq!(b - a, Duration::seconds(1));
assert_eq!(a - b, Duration::seconds(-1));
}
#[test]
fn test_date_before_1970() {
let early = strptime("1901-01-06", "%F").unwrap();
let late = strptime("2000-01-01", "%F").unwrap();
assert!(early < late);
}
#[test]
fn test_dst() {
let _reset = set_time_zone_london_dst();
let utc_in_feb = strptime("2015-02-01Z", "%F%z").unwrap();
let utc_in_jun = strptime("2015-06-01Z", "%F%z").unwrap();
let utc_in_nov = strptime("2015-11-01Z", "%F%z").unwrap();
let local_in_feb = utc_in_feb.to_local();
let local_in_jun = utc_in_jun.to_local();
let local_in_nov = utc_in_nov.to_local();
assert_eq!(local_in_feb.tm_mon, 1);
assert_eq!(local_in_feb.tm_hour, 0);
assert_eq!(local_in_feb.tm_utcoff, 0);
assert_eq!(local_in_feb.tm_isdst, 0);
assert_eq!(local_in_jun.tm_mon, 5);
assert_eq!(local_in_jun.tm_hour, 1);
assert_eq!(local_in_jun.tm_utcoff, 3600);
assert_eq!(local_in_jun.tm_isdst, 1);
assert_eq!(local_in_nov.tm_mon, 10);
assert_eq!(local_in_nov.tm_hour, 0);
assert_eq!(local_in_nov.tm_utcoff, 0);
assert_eq!(local_in_nov.tm_isdst, 0)
}
} | assert_eq!(local.strftime("%y").unwrap().to_string(), "09".to_string()); |
api_zone.go | /*
* Fava - OpenApi Gateway - DNS
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: v1
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package gopinto
import (
"bytes"
_context "context"
_ioutil "io/ioutil"
_nethttp "net/http"
_neturl "net/url"
"strings"
)
// Linger please
var (
_ _context.Context
)
type ZoneApi interface {
/*
* ApiDnsZoneDelete Deletes a DNS zone from the passed provider
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZoneDeleteRequest
*/
ApiDnsZoneDelete(ctx _context.Context) ApiApiDnsZoneDeleteRequest
/*
* ApiDnsZoneDeleteExecute executes the request
*/
ApiDnsZoneDeleteExecute(r ApiApiDnsZoneDeleteRequest) (*_nethttp.Response, GenericOpenAPIError)
/*
* ApiDnsZoneGet Retrieves the DNS zones assigned to the account
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZoneGetRequest
*/
ApiDnsZoneGet(ctx _context.Context) ApiApiDnsZoneGetRequest
/*
* ApiDnsZoneGetExecute executes the request
* @return []Zone
*/
ApiDnsZoneGetExecute(r ApiApiDnsZoneGetRequest) ([]Zone, *_nethttp.Response, GenericOpenAPIError)
/*
* ApiDnsZonePost Creates a new DNS zone
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZonePostRequest
*/
ApiDnsZonePost(ctx _context.Context) ApiApiDnsZonePostRequest
/*
* ApiDnsZonePostExecute executes the request
* @return Zone
*/
ApiDnsZonePostExecute(r ApiApiDnsZonePostRequest) (Zone, *_nethttp.Response, GenericOpenAPIError)
/*
* ApiDnsZoneZoneGet Loads the specified DNS zone
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param zone The name of the zone to query.
* @return ApiApiDnsZoneZoneGetRequest
*/
ApiDnsZoneZoneGet(ctx _context.Context, zone string) ApiApiDnsZoneZoneGetRequest
/*
* ApiDnsZoneZoneGetExecute executes the request
* @return Zone
*/
ApiDnsZoneZoneGetExecute(r ApiApiDnsZoneZoneGetRequest) (Zone, *_nethttp.Response, GenericOpenAPIError)
}
// ZoneApiService ZoneApi service
type ZoneApiService service
type ApiApiDnsZoneDeleteRequest struct {
ctx _context.Context
ApiService ZoneApi
name *string
xApiOptions *string
}
func (r ApiApiDnsZoneDeleteRequest) Name(name string) ApiApiDnsZoneDeleteRequest {
r.name = &name
return r
}
func (r ApiApiDnsZoneDeleteRequest) XApiOptions(xApiOptions string) ApiApiDnsZoneDeleteRequest {
r.xApiOptions = &xApiOptions
return r
}
func (r ApiApiDnsZoneDeleteRequest) Execute() (*_nethttp.Response, GenericOpenAPIError) {
return r.ApiService.ApiDnsZoneDeleteExecute(r)
}
/*
* ApiDnsZoneDelete Deletes a DNS zone from the passed provider
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZoneDeleteRequest
*/
func (a *ZoneApiService) ApiDnsZoneDelete(ctx _context.Context) ApiApiDnsZoneDeleteRequest {
return ApiApiDnsZoneDeleteRequest{
ApiService: a,
ctx: ctx,
}
}
/*
* Execute executes the request
*/
func (a *ZoneApiService) ApiDnsZoneDeleteExecute(r ApiApiDnsZoneDeleteRequest) (*_nethttp.Response, GenericOpenAPIError) {
var (
localVarHTTPMethod = _nethttp.MethodDelete
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
executionError GenericOpenAPIError
) |
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "ZoneApiService.ApiDnsZoneDelete")
if err != nil {
executionError.error = err.Error()
return nil, executionError
}
localVarPath := localBasePath + "/api/dns/Zone"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.name == nil {
executionError.error = "name is required and must be specified"
return nil, executionError
}
if r.xApiOptions == nil {
executionError.error = "xApiOptions is required and must be specified"
return nil, executionError
}
localVarQueryParams.Add("Name", parameterToString(*r.name, ""))
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
localVarHeaderParams["x-api-options"] = parameterToString(*r.xApiOptions, "")
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
executionError.error = err.Error()
return nil, executionError
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
executionError.error = err.Error()
return localVarHTTPResponse, executionError
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
executionError.error = err.Error()
return localVarHTTPResponse, executionError
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
return localVarHTTPResponse, newErr
}
return localVarHTTPResponse, executionError
}
type ApiApiDnsZoneGetRequest struct {
ctx _context.Context
ApiService ZoneApi
xApiOptions *string
}
func (r ApiApiDnsZoneGetRequest) XApiOptions(xApiOptions string) ApiApiDnsZoneGetRequest {
r.xApiOptions = &xApiOptions
return r
}
func (r ApiApiDnsZoneGetRequest) Execute() ([]Zone, *_nethttp.Response, GenericOpenAPIError) {
return r.ApiService.ApiDnsZoneGetExecute(r)
}
/*
* ApiDnsZoneGet Retrieves the DNS zones assigned to the account
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZoneGetRequest
*/
func (a *ZoneApiService) ApiDnsZoneGet(ctx _context.Context) ApiApiDnsZoneGetRequest {
return ApiApiDnsZoneGetRequest{
ApiService: a,
ctx: ctx,
}
}
/*
* Execute executes the request
* @return []Zone
*/
func (a *ZoneApiService) ApiDnsZoneGetExecute(r ApiApiDnsZoneGetRequest) ([]Zone, *_nethttp.Response, GenericOpenAPIError) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
executionError GenericOpenAPIError
localVarReturnValue []Zone
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "ZoneApiService.ApiDnsZoneGet")
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarPath := localBasePath + "/api/dns/Zone"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.xApiOptions == nil {
executionError.error = "xApiOptions is required and must be specified"
return localVarReturnValue, nil, executionError
}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
localVarHeaderParams["x-api-options"] = parameterToString(*r.xApiOptions, "")
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, executionError
}
type ApiApiDnsZonePostRequest struct {
ctx _context.Context
ApiService ZoneApi
xApiOptions *string
createZoneRequestModel *CreateZoneRequestModel
}
func (r ApiApiDnsZonePostRequest) XApiOptions(xApiOptions string) ApiApiDnsZonePostRequest {
r.xApiOptions = &xApiOptions
return r
}
func (r ApiApiDnsZonePostRequest) CreateZoneRequestModel(createZoneRequestModel CreateZoneRequestModel) ApiApiDnsZonePostRequest {
r.createZoneRequestModel = &createZoneRequestModel
return r
}
func (r ApiApiDnsZonePostRequest) Execute() (Zone, *_nethttp.Response, GenericOpenAPIError) {
return r.ApiService.ApiDnsZonePostExecute(r)
}
/*
* ApiDnsZonePost Creates a new DNS zone
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiApiDnsZonePostRequest
*/
func (a *ZoneApiService) ApiDnsZonePost(ctx _context.Context) ApiApiDnsZonePostRequest {
return ApiApiDnsZonePostRequest{
ApiService: a,
ctx: ctx,
}
}
/*
* Execute executes the request
* @return Zone
*/
func (a *ZoneApiService) ApiDnsZonePostExecute(r ApiApiDnsZonePostRequest) (Zone, *_nethttp.Response, GenericOpenAPIError) {
var (
localVarHTTPMethod = _nethttp.MethodPost
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
executionError GenericOpenAPIError
localVarReturnValue Zone
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "ZoneApiService.ApiDnsZonePost")
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarPath := localBasePath + "/api/dns/Zone"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.xApiOptions == nil {
executionError.error = "xApiOptions is required and must be specified"
return localVarReturnValue, nil, executionError
}
if r.createZoneRequestModel == nil {
executionError.error = "createZoneRequestModel is required and must be specified"
return localVarReturnValue, nil, executionError
}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{"application/json", "text/json", "application/_*+json"}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"text/plain", "application/json", "text/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
localVarHeaderParams["x-api-options"] = parameterToString(*r.xApiOptions, "")
// body params
localVarPostBody = r.createZoneRequestModel
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, executionError
}
type ApiApiDnsZoneZoneGetRequest struct {
ctx _context.Context
ApiService ZoneApi
zone string
xApiOptions *string
}
func (r ApiApiDnsZoneZoneGetRequest) XApiOptions(xApiOptions string) ApiApiDnsZoneZoneGetRequest {
r.xApiOptions = &xApiOptions
return r
}
func (r ApiApiDnsZoneZoneGetRequest) Execute() (Zone, *_nethttp.Response, GenericOpenAPIError) {
return r.ApiService.ApiDnsZoneZoneGetExecute(r)
}
/*
* ApiDnsZoneZoneGet Loads the specified DNS zone
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param zone The name of the zone to query.
* @return ApiApiDnsZoneZoneGetRequest
*/
func (a *ZoneApiService) ApiDnsZoneZoneGet(ctx _context.Context, zone string) ApiApiDnsZoneZoneGetRequest {
return ApiApiDnsZoneZoneGetRequest{
ApiService: a,
ctx: ctx,
zone: zone,
}
}
/*
* Execute executes the request
* @return Zone
*/
func (a *ZoneApiService) ApiDnsZoneZoneGetExecute(r ApiApiDnsZoneZoneGetRequest) (Zone, *_nethttp.Response, GenericOpenAPIError) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
executionError GenericOpenAPIError
localVarReturnValue Zone
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "ZoneApiService.ApiDnsZoneZoneGet")
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarPath := localBasePath + "/api/dns/Zone/{zone}"
localVarPath = strings.Replace(localVarPath, "{"+"zone"+"}", _neturl.PathEscape(parameterToString(r.zone, "")), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.xApiOptions == nil {
executionError.error = "xApiOptions is required and must be specified"
return localVarReturnValue, nil, executionError
}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{"text/plain", "application/json", "text/json"}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
localVarHeaderParams["x-api-options"] = parameterToString(*r.xApiOptions, "")
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, nil, executionError
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
executionError.error = err.Error()
return localVarReturnValue, localVarHTTPResponse, executionError
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr := GenericOpenAPIError{
body: localVarBody,
error: err.Error(),
}
return localVarReturnValue, localVarHTTPResponse, newErr
}
return localVarReturnValue, localVarHTTPResponse, executionError
} | |
main.go | package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
var memo []map[*TreeNode]int
func dfs(node *TreeNode, hasRedParent int) int {
if v, ok := memo[hasRedParent][node]; ok {
return v
}
if node == nil {
return 0
}
ret1, ret2 := 0, 0
ret1 = dfs(node.Left, 0) + dfs(node.Right, 0)
if hasRedParent == 0 {
ret2 = dfs(node.Left, 1) + dfs(node.Right, 1) + node.Val
}
memo[hasRedParent][node] = max(ret1, ret2)
return memo[hasRedParent][node]
}
func rob(root *TreeNode) int {
memo = []map[*TreeNode]int{map[*TreeNode]int{}, map[*TreeNode]int{}}
return dfs(root, 0)
}
func main() {
} | ||
items.go | // Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Drone Non-Commercial License
// that can be found in the LICENSE file.
// +build !oss
package queue
import (
"net/http"
"github.com/bibasoft-team/drone/core"
"github.com/bibasoft-team/drone/handler/api/render"
"github.com/bibasoft-team/drone/logger"
)
// HandleItems returns an http.HandlerFunc that writes a
// json-encoded list of queue items to the response body.
func | (store core.StageStore) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
items, err := store.ListIncomplete(ctx)
if err != nil {
render.InternalError(w, err)
logger.FromRequest(r).WithError(err).
Warnln("api: cannot get running items")
return
}
render.JSON(w, items, 200)
}
}
| HandleItems |
28.rs | // Sorting a list of lists according to length of sublists
trait HasLength {
fn leng(&self) -> usize;
}
macro_rules! impl_trait(($($T:ty)+) => {
$(impl HasLength for $T {
fn leng(&self) -> usize { self.len() }
})+
});
impl_trait!(String str);
impl_trait!(&[u8]);
fn | <T:HasLength>(v : &mut Vec<T>) {
v.sort_by(|x, y| x.leng().cmp(&y.leng()));
}
fn main() {
let s = vec!["abc", "de", "fgh", "de", "ijkl", "mn", "o"];
let mut res = s.iter().
map(|x| x.
as_bytes()).
collect::<Vec<_>>();
lfsort(&mut res);
println!("{:?}", res.into_iter().
map(|x| std::str::from_utf8(x)).collect::<Vec<_>>());
} | lfsort |
ref_count.rs | /// Make a ConnectableObservable behave like a ordinary observable and
/// automates the way you can connect to it.
///
/// Internally it counts the subscriptions to the observable and subscribes
/// (only once) to the source if the number of subscriptions is larger than
/// 0. If the number of subscriptions is smaller than 1, it unsubscribes
/// from the source. This way you can make sure that everything before the
/// published refCount has only a single subscription independently of the
/// number of subscribers to the target observable.
///
/// Note that using the share operator is exactly the same as using the | /// in a sequence.
use crate::{impl_helper::*, impl_local_shared_both, prelude::*};
#[derive(Clone)]
pub struct RefCount<R>(R);
pub enum InnerRefCount<Src, Sbj, U> {
Connectable(ConnectableObservable<Src, Sbj>),
Connected { subject: Sbj, connection: U },
}
impl<Src, Sbj, U> RefCount<MutRc<InnerRefCount<Src, Sbj, U>>> {
pub fn local(c: ConnectableObservable<Src, Sbj>) -> Self {
RefCount(MutRc::own(InnerRefCount::Connectable(c)))
}
}
impl<Src, Sbj, U> RefCount<MutArc<InnerRefCount<Src, Sbj, U>>> {
pub fn shared(c: ConnectableObservable<Src, Sbj>) -> Self {
RefCount(MutArc::own(InnerRefCount::Connectable(c)))
}
}
impl<Src, Sbj, U> Observable for RefCount<MutRc<InnerRefCount<Src, Sbj, U>>>
where
Src: Observable,
{
type Item = Src::Item;
type Err = Src::Err;
}
impl<Src, Sbj, U> Observable for RefCount<MutArc<InnerRefCount<Src, Sbj, U>>>
where
Src: Observable,
{
type Item = Src::Item;
type Err = Src::Err;
}
impl_local_shared_both! {
impl<Src, Sbj> RefCount<@ctx::Rc<InnerRefCount<Src, Sbj, Src::Unsub>>>;
type Unsub = RefCountSubscription<Sbj, Sbj::Unsub, Src::Unsub>;
macro method($self: ident, $observer: ident, $ctx: ident) {
let mut inner = $self.0.rc_deref_mut();
match &mut *inner {
InnerRefCount::Connectable(c) => {
let subject = c.fork();
let subscription = c.fork().actual_subscribe($observer);
let new_holder : ConnectableObservable<Src, Sbj> = unsafe {
std::mem::transmute_copy(c)
};
let connection = new_holder.connect();
let old = std::mem::replace(&mut *inner, InnerRefCount::Connected {
subject: subject.clone(),
connection: connection.clone()
});
std::mem::forget(old);
RefCountSubscription {
subject: subject.clone(), subscription, connection
}
},
InnerRefCount::Connected{ subject, connection } => {
let subscription = subject.clone().actual_subscribe($observer);
RefCountSubscription {
subject: subject.clone(),
subscription,
connection: connection.clone()
}
}
}
}
where
ConnectableObservable<Src, Sbj>: Connect<Unsub=Src::Unsub>,
Src: @ctx::Observable @ctx::local_only(+ 'o) @ctx::shared_only(+ 'static),
Src::Unsub: Clone @ctx::local_only(+ 'o) @ctx::shared_only(+'static),
Src::Item: Clone @ctx::local_only(+ 'o) @ctx::shared_only(+'static),
Src::Err: Clone @ctx::local_only(+ 'o) @ctx::shared_only(+'static),
Sbj: Observer<Item = Src::Item, Err = Src::Err>
+ TearDownSize + Clone
+ @ctx::Observable<Item=Src::Item, Err=Src::Err>
@ctx::shared_only(+ Send + Sync + 'static) @ctx::local_only(+ 'o)
}
pub struct RefCountSubscription<S, U, C> {
subject: S,
subscription: U,
connection: C,
}
impl<S, U, C> SubscriptionLike for RefCountSubscription<S, U, C>
where
S: TearDownSize,
C: SubscriptionLike,
U: SubscriptionLike,
{
fn unsubscribe(&mut self) {
self.subscription.unsubscribe();
if self.subject.teardown_size() == 0 {
self.connection.unsubscribe();
}
}
#[inline(always)]
fn is_closed(&self) -> bool { self.subscription.is_closed() }
}
#[cfg(test)]
mod test {
use crate::prelude::*;
#[test]
fn smoke() {
let mut accept1 = 0;
let mut accept2 = 0;
{
let ref_count = observable::of(1)
.publish::<LocalSubject<'_, _, _>>()
.into_ref_count();
ref_count.clone().subscribe(|v| accept1 = v);
ref_count.clone().subscribe(|v| accept2 = v);
}
assert_eq!(accept1, 1);
assert_eq!(accept2, 0);
}
#[test]
fn auto_unsubscribe() {
let mut accept1 = 0;
let mut accept2 = 0;
{
let mut subject = LocalSubject::new();
let ref_count = subject.clone().publish().into_ref_count();
let mut s1 = ref_count.clone().subscribe(|v| accept1 = v);
let mut s2 = ref_count.clone().subscribe(|v| accept2 = v);
subject.next(1);
s1.unsubscribe();
s2.unsubscribe();
subject.next(2);
}
assert_eq!(accept1, 1);
assert_eq!(accept2, 1);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn fork_and_shared() {
observable::of(1)
.publish::<LocalSubject<'_, _, _>>()
.into_ref_count()
.subscribe(|_| {});
SharedSubject::new()
.publish()
.into_ref_count()
.into_shared()
.subscribe(|_: i32| {});
observable::of(1)
.publish::<SharedSubject<_, _>>()
.into_ref_count()
.into_shared()
.subscribe(|_| {});
observable::of(1)
.into_shared()
.publish()
.into_ref_count()
.into_shared()
.subscribe(|_| {});
observable::of(1)
.into_shared()
.publish()
.into_ref_count()
.into_shared()
.into_shared()
.subscribe(|_| {});
}
#[test]
fn bench() { do_bench(); }
benchmark_group!(do_bench, bench_ref_count);
fn bench_ref_count(b: &mut bencher::Bencher) { b.iter(smoke) }
} | /// publish operator (making the observable hot) and the refCount operator |
tsc_config.rs | // Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::serde_json::Value;
use jsonc_parser::JsonValue;
use serde::Deserialize;
use std::collections::HashMap;
use std::fmt;
use std::str::FromStr;
#[derive(Clone, Debug, PartialEq)]
pub struct IgnoredCompilerOptions(pub Vec<String>);
impl fmt::Display for IgnoredCompilerOptions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut codes = self.0.clone();
codes.sort();
write!(f, "{}", codes.join(", "))
}
}
/// A static slice of all the compiler options that should be ignored that
/// either have no effect on the compilation or would cause the emit to not work
/// in Deno.
const IGNORED_COMPILER_OPTIONS: [&str; 61] = [
"allowSyntheticDefaultImports",
"allowUmdGlobalAccess",
"assumeChangesOnlyAffectDirectDependencies",
"baseUrl",
"build",
"composite",
"declaration",
"declarationDir",
"declarationMap",
"diagnostics",
"downlevelIteration",
"emitBOM",
"emitDeclarationOnly",
"esModuleInterop",
"extendedDiagnostics",
"forceConsistentCasingInFileNames",
"generateCpuProfile",
"help",
"importHelpers",
"incremental",
"inlineSourceMap",
"inlineSources",
"init",
"listEmittedFiles",
"listFiles",
"mapRoot",
"maxNodeModuleJsDepth",
"module",
"moduleResolution",
"newLine",
"noEmit",
"noEmitHelpers",
"noEmitOnError",
"noLib",
"noResolve",
"out",
"outDir",
"outFile",
"paths",
"preserveConstEnums",
"preserveSymlinks",
"preserveWatchOutput",
"pretty",
"reactNamespace",
"resolveJsonModule",
"rootDir",
"rootDirs",
"showConfig",
"skipDefaultLibCheck",
"skipLibCheck",
"sourceMap",
"sourceRoot",
"stripInternal",
"target",
"traceResolution",
"tsBuildInfoFile",
"types",
"typeRoots",
"useDefineForClassFields",
"version",
"watch",
];
/// A function that works like JavaScript's `Object.assign()`.
pub fn json_merge(a: &mut Value, b: &Value) {
match (a, b) {
(&mut Value::Object(ref mut a), &Value::Object(ref b)) => {
for (k, v) in b {
json_merge(a.entry(k.clone()).or_insert(Value::Null), v);
}
}
(a, b) => {
*a = b.clone();
}
}
}
/// Convert a jsonc libraries `JsonValue` to a serde `Value`.
fn | (j: JsonValue) -> Value {
match j {
JsonValue::Array(arr) => {
let vec = arr.into_iter().map(jsonc_to_serde).collect();
Value::Array(vec)
}
JsonValue::Boolean(bool) => Value::Bool(bool),
JsonValue::Null => Value::Null,
JsonValue::Number(num) => {
let number =
serde_json::Number::from_str(&num).expect("could not parse number");
Value::Number(number)
}
JsonValue::Object(obj) => {
let mut map = serde_json::map::Map::new();
for (key, json_value) in obj.into_iter() {
map.insert(key, jsonc_to_serde(json_value));
}
Value::Object(map)
}
JsonValue::String(str) => Value::String(str),
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TSConfigJson {
compiler_options: Option<HashMap<String, Value>>,
exclude: Option<Vec<String>>,
extends: Option<String>,
files: Option<Vec<String>>,
include: Option<Vec<String>>,
references: Option<Value>,
type_acquisition: Option<Value>,
}
pub fn parse_raw_config(config_text: &str) -> Result<Value, AnyError> {
assert!(!config_text.is_empty());
let jsonc = jsonc_parser::parse_to_value(config_text)?.unwrap();
Ok(jsonc_to_serde(jsonc))
}
/// Take a string of JSONC, parse it and return a serde `Value` of the text.
/// The result also contains any options that were ignored.
pub fn parse_config(
config_text: &str,
) -> Result<(Value, Option<IgnoredCompilerOptions>), AnyError> {
assert!(!config_text.is_empty());
let jsonc = jsonc_parser::parse_to_value(config_text)?.unwrap();
let config: TSConfigJson = serde_json::from_value(jsonc_to_serde(jsonc))?;
let mut compiler_options: HashMap<String, Value> = HashMap::new();
let mut items: Vec<String> = Vec::new();
if let Some(in_compiler_options) = config.compiler_options {
for (key, value) in in_compiler_options.iter() {
if IGNORED_COMPILER_OPTIONS.contains(&key.as_str()) {
items.push(key.to_owned());
} else {
compiler_options.insert(key.to_owned(), value.to_owned());
}
}
}
let options_value = serde_json::to_value(compiler_options)?;
let ignored_options = if !items.is_empty() {
Some(IgnoredCompilerOptions(items))
} else {
None
};
Ok((options_value, ignored_options))
}
#[cfg(test)]
mod tests {
use super::*;
use deno_core::serde_json::json;
#[test]
fn test_json_merge() {
let mut value_a = json!({
"a": true,
"b": "c"
});
let value_b = json!({
"b": "d",
"e": false,
});
json_merge(&mut value_a, &value_b);
assert_eq!(
value_a,
json!({
"a": true,
"b": "d",
"e": false,
})
);
}
#[test]
fn test_parse_config() {
let config_text = r#"{
"compilerOptions": {
"build": true,
// comments are allowed
"strict": true
}
}"#;
let (options_value, ignored) =
parse_config(config_text).expect("error parsing");
assert!(options_value.is_object());
let options = options_value.as_object().unwrap();
assert!(options.contains_key("strict"));
assert_eq!(options.len(), 1);
assert_eq!(
ignored,
Some(IgnoredCompilerOptions(vec!["build".to_string()])),
);
}
#[test]
fn test_parse_raw_config() {
let invalid_config_text = r#"{
"compilerOptions": {
// comments are allowed
}"#;
let errbox = parse_raw_config(invalid_config_text).unwrap_err();
assert!(errbox
.to_string()
.starts_with("Unterminated object on line 1"));
}
}
| jsonc_to_serde |
range.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib::translate::*;
glib::wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Range(Boxed<ffi::AtkRange>);
match fn {
copy => |ptr| ffi::atk_range_copy(mut_override(ptr)),
free => |ptr| ffi::atk_range_free(ptr),
get_type => || ffi::atk_range_get_type(),
}
}
impl Range {
#[doc(alias = "atk_range_new")]
pub fn new(lower_limit: f64, upper_limit: f64, description: &str) -> Range {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::atk_range_new(
lower_limit,
upper_limit,
description.to_glib_none().0,
))
}
}
#[doc(alias = "atk_range_get_description")]
pub fn get_description(&mut self) -> Option<glib::GString> {
unsafe { from_glib_none(ffi::atk_range_get_description(self.to_glib_none_mut().0)) }
}
#[doc(alias = "atk_range_get_lower_limit")]
pub fn | (&mut self) -> f64 {
unsafe { ffi::atk_range_get_lower_limit(self.to_glib_none_mut().0) }
}
#[doc(alias = "atk_range_get_upper_limit")]
pub fn get_upper_limit(&mut self) -> f64 {
unsafe { ffi::atk_range_get_upper_limit(self.to_glib_none_mut().0) }
}
}
| get_lower_limit |
server.go | // Copyright 2015 go-smpp authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package smpptest
import (
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"sync"
"github.com/fiorix/go-smpp/smpp/pdu"
"github.com/fiorix/go-smpp/smpp/pdu/pdufield"
)
// Default settings.
var (
DefaultUser = "client"
DefaultPasswd = "secret"
DefaultSystemID = "smpptest"
)
// HandlerFunc is the signature of a function passed to Server instances,
// that is called when client PDU messages arrive.
type HandlerFunc func(c Conn, m pdu.Body)
// Server is an SMPP server for testing purposes. By default it authenticate
// clients with the configured credentials, and echoes any other PDUs
// back to the client.
type Server struct {
User string
Passwd string
TLS *tls.Config
Handler HandlerFunc
conns []Conn
mu sync.Mutex
l net.Listener
}
// NewServer creates and initializes a new Server. Callers are supposed
// to call Close on that server later.
func NewServer() *Server {
s := NewUnstartedServer()
s.Start() | return s
}
// NewUnstartedServer creates a new Server with default settings, and
// does not start it. Callers are supposed to call Start and Close later.
func NewUnstartedServer() *Server {
return &Server{
User: DefaultUser,
Passwd: DefaultPasswd,
Handler: EchoHandler,
l: newLocalListener(),
}
}
func newLocalListener() net.Listener {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err == nil {
return l
}
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
panic(fmt.Sprintf("smpptest: failed to listen on a port: %v", err))
}
return l
}
// Start starts the server.
func (srv *Server) Start() {
go srv.Serve()
}
// Addr returns the local address of the server, or an empty string
// if the server hasn't been started yet.
func (srv *Server) Addr() string {
if srv.l == nil {
return ""
}
return srv.l.Addr().String()
}
// Close stops the server, causing the accept loop to break out.
func (srv *Server) Close() {
if srv.l == nil {
panic("smpptest: server is not started")
}
srv.l.Close()
}
// Serve accepts new clients and handle them by authenticating the
// first PDU, expected to be a Bind PDU, then echoing all other PDUs.
func (srv *Server) Serve() {
for {
cli, err := srv.l.Accept()
if err != nil {
break // on srv.l.Close
}
c := newConn(cli)
srv.conns = append(srv.conns, c)
go srv.handle(c)
}
}
// BroadcastMessage broadcasts a test PDU to the all bound clients
func (srv *Server) BroadcastMessage(p pdu.Body) {
for i := range srv.conns {
srv.conns[i].Write(p)
}
}
// handle new clients.
func (srv *Server) handle(c *conn) {
defer c.Close()
if err := srv.auth(c); err != nil {
if err != io.EOF {
log.Println("smpptest: server auth failed:", err)
}
return
}
for {
p, err := c.Read()
if err != nil {
if err != io.EOF {
log.Println("smpptest: read failed:", err)
}
break
}
srv.Handler(c, p)
}
}
// auth authenticate new clients.
func (srv *Server) auth(c *conn) error {
p, err := c.Read()
if err != nil {
return err
}
var resp pdu.Body
switch p.Header().ID {
case pdu.BindTransmitterID:
resp = pdu.NewBindTransmitterResp()
case pdu.BindReceiverID:
resp = pdu.NewBindReceiverResp()
case pdu.BindTransceiverID:
resp = pdu.NewBindTransceiverResp()
default:
return errors.New("unexpected pdu, want bind")
}
f := p.Fields()
user := f[pdufield.SystemID]
passwd := f[pdufield.Password]
if user == nil || passwd == nil {
return errors.New("malformed pdu, missing system_id/password")
}
if user.String() != srv.User {
return errors.New("invalid user")
}
if passwd.String() != srv.Passwd {
return errors.New("invalid passwd")
}
resp.Fields().Set(pdufield.SystemID, DefaultSystemID)
return c.Write(resp)
}
// EchoHandler is the default Server HandlerFunc, and echoes back
// any PDUs received.
func EchoHandler(cli Conn, m pdu.Body) {
// log.Printf("smpptest: echo PDU from %s: %#v", cli.RemoteAddr(), m)
//
// Real servers will reply with at least the same sequence number
// from the request:
// resp := pdu.NewSubmitSMResp()
// resp.Header().Seq = m.Header().Seq
// resp.Fields().Set(pdufield.MessageID, "1234")
// cli.Write(resp)
//
// We just echo m back:
cli.Write(m)
} | |
abc178C_ubiquity.py | # Vicfred & uninhm
# https://atcoder.jp/contests/abc178/tasks/abc178_c
# combinatorics
n = int(input()) | mod = 10**9+7
print((pow(10, n, mod) - 2*pow(9, n, mod) + pow(8, n, mod)) % mod) |
|
DiscoverGranulesS3SuccessSpec.js | 'use strict';
const pWaitFor = require('p-wait-for');
const { randomString } = require('@cumulus/common/test-utils');
const {
buildAndExecuteWorkflow,
loadCollection,
loadProvider,
waitForCompletedExecution
} = require('@cumulus/integration-tests');
const { LambdaStep } = require('@cumulus/integration-tests/sfnStep');
const {
createCollection, deleteCollection
} = require('@cumulus/api-client/collections');
const { getExecution } = require('@cumulus/api-client/executions');
const {
createProvider, deleteProvider
} = require('@cumulus/api-client/providers');
const {
deleteFolder, loadConfig, updateAndUploadTestDataToBucket
} = require('../../helpers/testUtils');
describe('The DiscoverGranules workflow', () => {
let beforeAllCompleted = false;
let collection;
let provider;
let queueGranulesOutput;
let workflowExecution;
let stackName;
let bucket;
let providerPath;
beforeAll(async () => {
({ stackName, bucket } = await loadConfig());
process.env.stackName = stackName;
process.env.system_bucket = bucket;
process.env.ProvidersTable = `${stackName}-ProvidersTable`;
const testId = randomString();
// Create the provider
provider = await loadProvider({
filename: './data/providers/s3/s3_provider.json',
postfix: testId,
s3Host: bucket
});
await createProvider({ prefix: stackName, provider });
// Create the collection
collection = await loadCollection({
filename: './data/collections/s3_MOD09GQ_006/s3_MOD09GQ_006.json',
postfix: testId
});
await createCollection({ prefix: stackName, collection });
providerPath = `cumulus-test-data/${testId}`;
// Upload the granule to be discovered
await updateAndUploadTestDataToBucket(
bucket,
[
'@cumulus/test-data/granules/MOD09GQ.A2016358.h13v04.006.2016360104606.hdf.met',
'@cumulus/test-data/granules/MOD09GQ.A2016358.h13v04.006.2016360104606.hdf',
'@cumulus/test-data/granules/MOD09GQ.A2016358.h13v04.006.2016360104606_ndvi.jpg'
],
providerPath
);
// Execute the DiscoverGranules workflow
workflowExecution = await buildAndExecuteWorkflow(
stackName,
bucket,
'DiscoverGranules',
collection,
provider,
undefined,
{ provider_path: providerPath }
);
// Get the output of the QueueGranules task. Doing it here because there are
// two tests that need it.
queueGranulesOutput = await (new LambdaStep()).getStepOutput(
workflowExecution.executionArn,
'QueueGranules'
);
beforeAllCompleted = true;
});
afterAll(() =>
Promise.all([
deleteFolder(bucket, providerPath),
deleteCollection({
prefix: stackName,
collectionName: collection.name,
collectionVersion: collection.version
}),
deleteProvider({
prefix: stackName,
provider: provider.id
})
]));
it('executes successfully', () => {
if (!beforeAllCompleted) fail('beforeAll() failed');
else expect(workflowExecution.status).toEqual('SUCCEEDED');
});
| await expectAsync(
pWaitFor(
async () => {
const { status } = await getExecution({
prefix: stackName,
arn: workflowExecution.executionArn
});
return status === 'completed';
},
{ interval: 2000, timeout: 60000 }
)
).toBeResolved();
}
});
it('results in a successful IngestGranule workflow execution', async () => {
if (!beforeAllCompleted) fail('beforeAll() failed');
else {
const ingestGranuleExecutionStatus = await waitForCompletedExecution(
queueGranulesOutput.payload.running[0]
);
expect(ingestGranuleExecutionStatus).toEqual('SUCCEEDED');
}
});
describe('DiscoverGranules task', () => {
it('outputs the list of discovered granules', async () => {
if (!beforeAllCompleted) fail('beforeAll() failed');
else {
const discoverGranulesOutput = await (new LambdaStep()).getStepOutput(
workflowExecution.executionArn,
'DiscoverGranules'
);
expect(discoverGranulesOutput.payload.granules.length).toEqual(1);
const granule = discoverGranulesOutput.payload.granules[0];
expect(granule.granuleId).toEqual('MOD09GQ.A2016358.h13v04.006.2016360104606');
expect(granule.dataType).toEqual(collection.name);
expect(granule.version).toEqual(collection.version);
expect(granule.files.length).toEqual(3);
}
});
});
describe('QueueGranules task', () => {
it('has queued the granule', () => {
if (!beforeAllCompleted) fail('beforeAll() failed');
else expect(queueGranulesOutput.payload.running.length).toEqual(1);
});
});
}); | it('can be fetched from the API', async () => {
if (!beforeAllCompleted) fail('beforeAll() failed');
else { |
m1.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: imports/test_a_1/m1.proto
package test_a_1
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type E1 int32
const (
E1_E1_ZERO E1 = 0
)
var E1_name = map[int32]string{
0: "E1_ZERO",
}
var E1_value = map[string]int32{
"E1_ZERO": 0,
}
func (x E1) String() string {
return proto.EnumName(E1_name, int32(x))
}
func (E1) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_c1091de3fa870a14, []int{0}
}
type M1 struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *M1) Reset() { *m = M1{} }
func (m *M1) String() string { return proto.CompactTextString(m) }
func (*M1) ProtoMessage() {}
func (*M1) Descriptor() ([]byte, []int) {
return fileDescriptor_c1091de3fa870a14, []int{0}
}
func (m *M1) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_M1.Unmarshal(m, b)
}
func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_M1.Marshal(b, m, deterministic)
}
func (m *M1) XXX_Merge(src proto.Message) {
xxx_messageInfo_M1.Merge(m, src)
}
func (m *M1) XXX_Size() int {
return xxx_messageInfo_M1.Size(m)
}
func (m *M1) XXX_DiscardUnknown() {
xxx_messageInfo_M1.DiscardUnknown(m)
}
var xxx_messageInfo_M1 proto.InternalMessageInfo
type M1_1 struct {
M1 *M1 `protobuf:"bytes,1,opt,name=m1,proto3" json:"m1,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *M1_1) Reset() { *m = M1_1{} }
func (m *M1_1) String() string { return proto.CompactTextString(m) }
func (*M1_1) ProtoMessage() {}
func (*M1_1) Descriptor() ([]byte, []int) {
return fileDescriptor_c1091de3fa870a14, []int{1}
}
func (m *M1_1) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_M1_1.Unmarshal(m, b)
}
func (m *M1_1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_M1_1.Marshal(b, m, deterministic)
}
func (m *M1_1) XXX_Merge(src proto.Message) {
xxx_messageInfo_M1_1.Merge(m, src)
}
func (m *M1_1) XXX_Size() int {
return xxx_messageInfo_M1_1.Size(m)
}
func (m *M1_1) XXX_DiscardUnknown() {
xxx_messageInfo_M1_1.DiscardUnknown(m)
}
var xxx_messageInfo_M1_1 proto.InternalMessageInfo
func (m *M1_1) GetM1() *M1 {
if m != nil {
return m.M1
}
return nil
}
func init() |
func init() { proto.RegisterFile("imports/test_a_1/m1.proto", fileDescriptor_c1091de3fa870a14) }
var fileDescriptor_c1091de3fa870a14 = []byte{
// 163 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8,
0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd4,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9,
0x1a, 0x2a, 0x29, 0x71, 0xb1, 0xf8, 0x1a, 0xc6, 0x1b, 0x0a, 0x49, 0x71, 0x31, 0xe5, 0x1a, 0x4a,
0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x71, 0xe9, 0x41, 0x94, 0xe8, 0xf9, 0x1a, 0x06, 0x31, 0xe5,
0x1a, 0x6a, 0x09, 0x72, 0x31, 0xb9, 0x1a, 0x0a, 0x71, 0x73, 0xb1, 0xbb, 0x1a, 0xc6, 0x47, 0xb9,
0x06, 0xf9, 0x0b, 0x30, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25,
0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x4d, 0x4f, 0x2a, 0x4d, 0x83, 0x30, 0x92,
0x75, 0xd3, 0x53, 0xf3, 0x74, 0xc1, 0x12, 0x20, 0xc3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0xdd,
0x94, 0xc4, 0x06, 0x56, 0x6a, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xfe, 0xd5, 0x3e, 0x41, 0xae,
0x00, 0x00, 0x00,
}
| {
proto.RegisterEnum("test.a.E1", E1_name, E1_value)
proto.RegisterType((*M1)(nil), "test.a.M1")
proto.RegisterType((*M1_1)(nil), "test.a.M1_1")
} |
common.py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04b_classification.model.meta_arch.common.ipynb (unless otherwise specified).
__all__ = ['GeneralizedImageClassifier']
# Cell
import logging
from collections import namedtuple
from typing import *
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.core.memory import get_human_readable_count
from torch.nn import Module
from ..backbones import ImageClassificationBackbone
from ..build import build_backbone, build_head
from ..heads import ImageClassificationHead
from ....core_classes import BasicModule
from ....utils.shape_spec import ShapeSpec
_logger = logging.getLogger(__name__)
# Cell
class GeneralizedImageClassifier(BasicModule):
"""
A General Image Classifier. Any models that contains the following 2 components:
1. Feature extractor (aka backbone)
2. Image Classification head (Pooling + Classifier)
"""
_hypers = namedtuple("hypers", field_names=["lr", "wd"])
def __init__(
self,
backbone: ImageClassificationBackbone,
head: ImageClassificationHead,
):
"""
Arguments:
1. `backbone`: a `ImageClassificationBackbone` module, must follow gale's backbone interface
2. `head`: a head containg the classifier. and the pooling layer, must be an instance of
`ImageClassificationHead`.
"""
super(GeneralizedImageClassifier, self).__init__()
self.backbone = backbone
assert isinstance(backbone, ImageClassificationBackbone)
self.head = head
assert isinstance(head, ImageClassificationHead)
def forward(self, batched_inputs: torch.Tensor) -> torch.Tensor:
|
@classmethod
def from_config_dict(cls, cfg: DictConfig):
"""
Instantiate the Meta Architecture from gale config
"""
if not hasattr(cfg.model, "backbone"):
raise ValueError("Configuration for model backbone not found")
if not hasattr(cfg.model, "head"):
raise ValueError("Configuration for model head not found")
input_shape = ShapeSpec(cfg.input.channels, cfg.input.height, cfg.input.width)
_logger.debug(f"Inputs: {input_shape}")
backbone = build_backbone(cfg, input_shape=input_shape)
param_count = get_human_readable_count(
sum([m.numel() for m in backbone.parameters()])
)
_logger.debug(
"Backbone {} created, param count: {}.".format(
cfg.model.backbone.name, param_count
)
)
head = build_head(cfg, backbone.output_shape())
param_count = get_human_readable_count(
sum([m.numel() for m in head.parameters()])
)
_logger.debug(
"Head {} created, param count: {}.".format(cfg.model.head.name, param_count)
)
kwds = {"backbone": backbone, "head": head}
instance = cls(**kwds)
instance.input_shape = input_shape
param_count = get_human_readable_count(
sum([m.numel() for m in instance.parameters()])
)
_logger.info("Model created, param count: {}.".format(param_count))
return instance
def build_param_dicts(self):
"""
Builds up the Paramters dicts for optimization
"""
backbone_params = self.backbone.build_param_dicts()
head_params = self.head.build_param_dicts()
return backbone_params + head_params
@property
def hypers(self) -> Tuple:
"""
Returns list of parameters like `lr` and `wd`
for each param group
"""
lrs = []
wds = []
for p in self.build_param_dicts():
lrs.append(p["lr"])
wds.append(p["weight_decay"])
return self._hypers(lrs, wds) | """
Runs the batched_inputs through `backbone` followed by the `head`.
Returns a Tensor which contains the logits for the batched_inputs.
"""
# forward pass through the backbone
out = self.backbone(batched_inputs)
# pass through the classification layer
out = self.head(out)
return out |
mod.rs | use std::collections::HashMap;
use pest::{Parser, Error as PestError};
use pest::prec_climber::{PrecClimber, Operator, Assoc};
use pest::iterators::Pair;
use errors::{Result as TeraResult, ResultExt};
// This include forces recompiling this source file if the grammar file changes.
// Uncomment it when doing changes to the .pest file
const _GRAMMAR: &'static str = include_str!("tera.pest");
#[derive(Parser)]
#[grammar = "parser/tera.pest"]
pub struct TeraParser;
pub mod ast;
mod whitespace;
#[cfg(test)]
mod tests;
pub use self::whitespace::remove_whitespace;
use self::ast::*;
lazy_static! {
static ref MATH_CLIMBER: PrecClimber<Rule> = PrecClimber::new(vec![
// +, -
Operator::new(Rule::op_plus, Assoc::Left) | Operator::new(Rule::op_minus, Assoc::Left),
// *, /, %
Operator::new(Rule::op_times, Assoc::Left) |
Operator::new(Rule::op_slash, Assoc::Left) |
Operator::new(Rule::op_modulo, Assoc::Left),
]);
static ref COMPARISON_EXPR_CLIMBER: PrecClimber<Rule> = PrecClimber::new(vec![
// <, <=, >, >=, ==, !=
Operator::new(Rule::op_lt, Assoc::Left) | Operator::new(Rule::op_lte, Assoc::Left)
| Operator::new(Rule::op_gt, Assoc::Left) | Operator::new(Rule::op_gte, Assoc::Left)
| Operator::new(Rule::op_eq, Assoc::Left) | Operator::new(Rule::op_ineq, Assoc::Left),
]);
static ref LOGIC_EXPR_CLIMBER: PrecClimber<Rule> = PrecClimber::new(vec![
Operator::new(Rule::op_or, Assoc::Left),
Operator::new(Rule::op_and, Assoc::Left),
]);
}
fn parse_kwarg(pair: Pair<Rule>) -> (String, Expr) {
let mut name = None;
let mut val = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => name = Some(p.into_span().as_str().to_string()),
Rule::logic_expr => val = Some(parse_logic_expr(p)),
_ => unreachable!("{:?} not supposed to get there (parse_kwarg)!", p.as_rule())
};
}
(name.unwrap(), val.unwrap())
}
fn parse_fn_call(pair: Pair<Rule>) -> FunctionCall {
let mut name = None;
let mut args = HashMap::new();
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => name = Some(p.into_span().as_str().to_string()),
Rule::kwarg => {
let (name, val) = parse_kwarg(p);
args.insert(name, val);
}
_ => unreachable!("{:?} not supposed to get there (parse_fn_call)!", p.as_rule())
};
}
FunctionCall { name: name.unwrap(), args }
}
fn parse_filter(pair: Pair<Rule>) -> FunctionCall {
let mut name = None;
let mut args = HashMap::new();
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => name = Some(p.into_span().as_str().to_string()),
Rule::kwarg => {
let (name, val) = parse_kwarg(p);
args.insert(name, val);
}
Rule::fn_call => {
return parse_fn_call(p);
}
_ => unreachable!("{:?} not supposed to get there (parse_filter)!", p.as_rule())
};
}
FunctionCall { name: name.unwrap(), args }
}
fn parse_test_call(pair: Pair<Rule>) -> (String, Vec<Expr>) {
let mut name = None;
let mut args = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => name = Some(p.into_span().as_str().to_string()),
Rule::test_args =>
// iterate on the test_arg rule
for p2 in p.into_inner() {
// only expressions allowed in the grammar so we skip the
// matching
for p3 in p2.into_inner() {
args.push(parse_logic_expr(p3));
}
},
_ => unreachable!("{:?} not supposed to get there (parse_test_call)!", p.as_rule())
};
}
(name.unwrap(), args)
}
fn parse_test(pair: Pair<Rule>) -> Test {
let mut ident = None;
let mut name = None;
let mut args = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::dotted_ident => ident = Some(p.as_str().to_string()),
Rule::test_call => {
let (_name, _args) = parse_test_call(p);
name = Some(_name);
args = _args;
},
_ => unreachable!("{:?} not supposed to get there (parse_ident)!", p.as_rule())
};
}
Test { ident: ident.unwrap(), name: name.unwrap(), args }
}
fn parse_basic_expression(pair: Pair<Rule>) -> ExprVal {
let primary = |pair| {
parse_basic_expression(pair)
};
let infix = |lhs: ExprVal, op: Pair<Rule>, rhs: ExprVal| {
ExprVal::Math(
MathExpr {
lhs: Box::new(Expr::new(lhs)),
operator: match op.as_rule() {
Rule::op_plus => MathOperator::Add,
Rule::op_minus => MathOperator::Sub,
Rule::op_times => MathOperator::Mul,
Rule::op_slash => MathOperator::Div,
Rule::op_modulo => MathOperator::Modulo,
_ => unreachable!()
},
rhs: Box::new(Expr::new(rhs)),
}
)
};
match pair.as_rule() {
Rule::int => ExprVal::Int(pair.as_str().parse().unwrap()),
Rule::float => ExprVal::Float(pair.as_str().parse().unwrap()),
Rule::boolean => match pair.as_str() {
"true" => ExprVal::Bool(true),
"false" => ExprVal::Bool(false),
_ => unreachable!(),
},
Rule::test => ExprVal::Test(parse_test(pair)),
Rule::fn_call => ExprVal::FunctionCall(parse_fn_call(pair)),
Rule::macro_call => ExprVal::MacroCall(parse_macro_call(pair)),
Rule::string => ExprVal::String(pair.as_str().replace("\"", "").to_string()),
Rule::dotted_ident => ExprVal::Ident(pair.as_str().to_string()),
Rule::basic_expr => MATH_CLIMBER.climb(pair.into_inner(), primary, infix),
_ => unreachable!("Got {:?} in parse_basic_expression", pair.as_rule())
}
}
/// A basic expression with optional filters
fn parse_basic_expr_with_filters(pair: Pair<Rule>) -> Expr {
let mut expr = None;
let mut filters = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::basic_expr => expr = Some(parse_basic_expression(p)),
Rule::filter => filters.push(parse_filter(p)),
_ => unreachable!("Got {:?}", p),
};
}
Expr { val: expr.unwrap(), negated: false, filters }
}
/// A basic expression with optional filters
/// TODO: to rewrite
fn parse_comparison_val(pair: Pair<Rule>) -> Expr {
let primary = |pair| {
parse_comparison_val(pair)
};
let infix = |lhs: Expr, op: Pair<Rule>, rhs: Expr| {
Expr::new(ExprVal::Math(
MathExpr {
lhs: Box::new(lhs),
operator: match op.as_rule() {
Rule::op_plus => MathOperator::Add,
Rule::op_minus => MathOperator::Sub,
Rule::op_times => MathOperator::Mul,
Rule::op_slash => MathOperator::Div,
Rule::op_modulo => MathOperator::Modulo,
_ => unreachable!()
},
rhs: Box::new(rhs),
}
))
};
match pair.as_rule() {
Rule::basic_expr_filter => parse_basic_expr_with_filters(pair),
Rule::comparison_val => MATH_CLIMBER.climb(pair.into_inner(), primary, infix),
_ => unreachable!("Got {:?} in parse_comparison_val", pair.as_rule())
}
}
fn parse_comparison_expression(pair: Pair<Rule>) -> Expr {
let primary = |pair| {
parse_comparison_expression(pair)
};
let infix = |lhs: Expr, op: Pair<Rule>, rhs: Expr| {
Expr::new(
ExprVal::Logic(
LogicExpr {
lhs: Box::new(lhs),
operator: match op.as_rule() {
Rule::op_lt => LogicOperator::Lt,
Rule::op_lte => LogicOperator::Lte,
Rule::op_gt => LogicOperator::Gt,
Rule::op_gte => LogicOperator::Gte,
Rule::op_ineq => LogicOperator::NotEq,
Rule::op_eq => LogicOperator::Eq,
_ => unreachable!()
},
rhs: Box::new(rhs),
}
)
)
};
match pair.as_rule() {
Rule::comparison_val => parse_comparison_val(pair),
Rule::comparison_expr => COMPARISON_EXPR_CLIMBER.climb(pair.into_inner(), primary, infix),
_ => unreachable!("Got {:?} in parse_comparison_expression", pair.as_rule())
}
}
/// An expression that can be negated
fn parse_logic_val(pair: Pair<Rule>) -> Expr {
let mut negated = false;
let mut expr = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::op_not => negated = true,
Rule::comparison_expr => expr = Some(parse_comparison_expression(p)),
_=> unreachable!(),
};
}
let mut e = expr.unwrap();
e.negated = negated;
e
}
fn parse_logic_expr(pair: Pair<Rule>) -> Expr {
let primary = |pair: Pair<Rule>| {
parse_logic_expr(pair)
};
let infix = |lhs: Expr, op: Pair<Rule>, rhs: Expr| {
match op.as_rule() {
Rule::op_or => {
Expr::new(ExprVal::Logic(LogicExpr {
lhs: Box::new(lhs),
operator: LogicOperator::Or,
rhs: Box::new(rhs)
}))
}
Rule::op_and => {
Expr::new(ExprVal::Logic(LogicExpr {
lhs: Box::new(lhs),
operator: LogicOperator::And,
rhs: Box::new(rhs)
}))
}
_ => unreachable!("{:?} not supposed to get there (infix of logic_expression)!", op.as_rule())
}
};
match pair.as_rule() {
Rule::logic_val => parse_logic_val(pair),
Rule::logic_expr => LOGIC_EXPR_CLIMBER.climb(pair.into_inner(), primary, infix),
_ => unreachable!("Got {:?} in parse_logic_expr", pair.as_rule())
}
}
fn parse_macro_call(pair: Pair<Rule>) -> MacroCall {
let mut namespace = None;
let mut name = None;
let mut args = HashMap::new();
for p in pair.into_inner() {
match p.as_rule() {
Rule::ident => {
// namespace comes first
if namespace.is_none() {
namespace = Some(p.into_span().as_str().to_string());
} else {
name = Some(p.into_span().as_str().to_string());
}
},
Rule::kwarg => {
let (key, val) = parse_kwarg(p);
args.insert(key, val);
},
_ => unreachable!("Got {:?} in parse_macro_call", p.as_rule())
}
}
MacroCall { namespace: namespace.unwrap(), name: name.unwrap(), args }
}
fn parse_variable_tag(pair: Pair<Rule>) -> Node {
let p = pair.into_inner().nth(0).unwrap();
Node::VariableBlock(parse_logic_expr(p))
}
fn parse_import_macro(pair: Pair<Rule>) -> Node {
let mut ws = WS::default();
let mut file = None;
let mut ident = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::tag_start => {
ws.left = p.into_span().as_str() == "{%-";
}
Rule::string => file = Some(p.into_span().as_str().replace("\"", "").to_string()),
Rule::ident => ident = Some(p.into_span().as_str().to_string()),
Rule::tag_end => {
ws.right = p.into_span().as_str() == "-%}";
}
_ => unreachable!()
};
}
Node::ImportMacro(ws, file.unwrap(), ident.unwrap())
}
/// `extends` and `include` have the same structure so only way fn to parse them both
fn parse_extends_include(pair: Pair<Rule>) -> (WS, String) {
let mut ws = WS::default();
let mut file = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::tag_start => {
ws.left = p.into_span().as_str() == "{%-";
}
Rule::string => file = Some(p.into_span().as_str().replace("\"", "").to_string()),
Rule::tag_end => {
ws.right = p.into_span().as_str() == "-%}";
}
_ => unreachable!()
};
}
(ws, file.unwrap())
}
fn parse_set_tag(pair: Pair<Rule>) -> Node {
let mut ws = WS::default();
let mut key = None;
let mut expr = None;
let mut global = false;
for p in pair.into_inner() {
match p.as_rule() {
Rule::tag_start => {
ws.left = p.into_span().as_str() == "{%-";
},
Rule::tag_end => {
ws.right = p.into_span().as_str() == "-%}";
},
Rule::set_scope => {
global = p.into_span().as_str() == "set_global";
}
Rule::ident => key = Some(p.as_str().to_string()),
Rule::logic_expr=> expr = Some(parse_logic_expr(p)),
_ => unreachable!("unexpected {:?} rule in parse_set_tag", p.as_rule()),
}
}
Node::Set(ws, Set {key: key.unwrap(), value: expr.unwrap(), global})
}
fn parse_raw_tag(pair: Pair<Rule>) -> Node {
let mut start_ws = WS::default();
let mut end_ws = WS::default();
let mut text = None;
for p in pair.into_inner() {
match p.as_rule() {
Rule::raw_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => start_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => start_ws.right = p2.into_span().as_str() == "-%}",
_ => unreachable!(),
}
}
},
Rule::raw_text => text = Some(p.as_str().to_string()),
Rule::endraw_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
_ => unreachable!(),
}
}
},
_ => unreachable!("unexpected {:?} rule in parse_raw_tag", p.as_rule()),
};
}
Node::Raw(start_ws, text.unwrap(), end_ws)
}
fn parse_filter_section(pair: Pair<Rule>) -> Node |
fn parse_block(pair: Pair<Rule>) -> Node {
let mut start_ws = WS::default();
let mut end_ws = WS::default();
let mut name = None;
let mut body = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::block_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => start_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => start_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => name = Some(p2.into_span().as_str().to_string()),
_ => unreachable!(),
};
}
},
Rule::block_content => body.extend(parse_content(p)),
Rule::endblock_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => (),
_ => unreachable!(),
};
}
},
_ => unreachable!("unexpected {:?} rule in parse_filter_section", p.as_rule()),
};
}
Node::Block(start_ws, Block {name: name.unwrap(), body} ,end_ws)
}
fn parse_macro_definition(pair: Pair<Rule>) -> Node {
let mut start_ws = WS::default();
let mut end_ws = WS::default();
let mut name = None;
let mut args = HashMap::new();
let mut body = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::macro_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => start_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => start_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => name = Some(p2.as_str().to_string()),
Rule::macro_def_arg => {
let mut arg_name = None;
let mut default_val = None;
for p3 in p2.into_inner() {
match p3.as_rule() {
Rule::ident => arg_name = Some(p3.as_str().to_string()),
// no filters allowed on macro definition
_ => default_val = Some(Expr::new(parse_basic_expression(p3))),
};
}
args.insert(arg_name.unwrap(), default_val);
},
_ => continue,
};
}
},
Rule::macro_content => body.extend(parse_content(p)),
Rule::endmacro_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => (),
_ => unreachable!(),
};
}
},
_ => unreachable!("unexpected {:?} rule in parse_macro_definition", p.as_rule()),
}
}
Node::MacroDefinition(start_ws, MacroDefinition {name: name.unwrap(), args, body}, end_ws)
}
fn parse_forloop(pair: Pair<Rule>) -> Node {
let mut start_ws = WS::default();
let mut end_ws = WS::default();
let mut key = None;
let mut value = None;
let mut container = None;
let mut body = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::for_tag => {
let mut idents = vec![];
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => start_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => start_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => idents.push(p2.as_str().to_string()),
Rule::logic_expr => container = Some(parse_logic_expr(p2)),
_ => unreachable!(),
};
}
if idents.len() == 1 {
value = Some(idents[0].clone());
} else {
key = Some(idents[0].clone());
value = Some(idents[1].clone());
}
},
Rule::content | Rule::macro_content | Rule::block_content => body.extend(parse_content(p)),
Rule::endfor_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
Rule::ident => (),
_ => unreachable!(),
};
}
},
_ => unreachable!("unexpected {:?} rule in parse_forloop", p.as_rule()),
};
}
Node::Forloop(
start_ws,
Forloop {
key,
value: value.unwrap(),
container: container.unwrap(),
body,
},
end_ws,
)
}
fn parse_if(pair: Pair<Rule>) -> Node {
// the `endif` tag ws handling
let mut end_ws = WS::default();
let mut conditions = vec![];
let mut otherwise = None;
// the current node we're exploring
let mut current_ws = WS::default();
let mut expr = None;
let mut current_body = vec![];
let mut in_else = false;
for p in pair.into_inner() {
match p.as_rule() {
Rule::if_tag | Rule::elif_tag => {
// Reset everything for elifs
if p.as_rule() == Rule::elif_tag {
conditions.push((current_ws, expr.unwrap(), current_body));
expr = None;
current_ws = WS::default();
current_body = vec![];
}
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => current_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => current_ws.right = p2.into_span().as_str() == "-%}",
Rule::logic_expr => expr = Some(parse_logic_expr(p2)),
_ => unreachable!(),
};
}
},
Rule::content | Rule::macro_content | Rule::block_content => current_body.extend(parse_content(p)),
Rule::else_tag => {
// had an elif before the else
if expr.is_some() {
conditions.push((current_ws, expr.unwrap(), current_body));
expr = None;
current_ws = WS::default();
current_body = vec![];
}
in_else = true;
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => current_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => current_ws.right = p2.into_span().as_str() == "-%}",
_ => unreachable!(),
};
}
},
Rule::endif_tag => {
if in_else {
otherwise = Some((current_ws, current_body));
} else {
// the last elif
conditions.push((current_ws, expr.unwrap(), current_body));
}
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
_ => unreachable!(),
};
}
break;
},
_ => unreachable!("unreachable rule in parse_if: {:?}", p.as_rule()),
}
}
Node::If(If {conditions, otherwise}, end_ws)
}
fn parse_content(pair: Pair<Rule>) -> Vec<Node> {
let mut nodes = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::include_tag => {
let (ws, file) = parse_extends_include(p);
nodes.push(Node::Include(ws, file));
},
// Ignore comments
Rule::comment_tag => (),
Rule::super_tag => nodes.push(Node::Super),
Rule::set_tag => nodes.push(parse_set_tag(p)),
Rule::raw => nodes.push(parse_raw_tag(p)),
Rule::variable_tag => nodes.push(parse_variable_tag(p)),
Rule::import_macro_tag => nodes.push(parse_import_macro(p)),
Rule::macro_definition => nodes.push(parse_macro_definition(p)),
Rule::forloop | Rule::macro_forloop | Rule::block_forloop => nodes.push(parse_forloop(p)),
Rule::content_if | Rule::macro_if | Rule::block_if => nodes.push(parse_if(p)),
Rule::filter_section | Rule::macro_filter_section | Rule::block_filter_section => {
nodes.push(parse_filter_section(p))
},
Rule::text => nodes.push(Node::Text(p.into_span().as_str().to_string())),
Rule::block => nodes.push(parse_block(p)),
_ => unreachable!("unreachable content rule: {:?}", p.as_rule())
};
}
nodes
}
pub fn parse(input: &str) -> TeraResult<Vec<Node>> {
let mut pairs = match TeraParser::parse(Rule::template, input) {
Ok(p) => p,
Err(e) => match e {
PestError::ParsingError { pos, .. } => {
let (line_no, col_no) = pos.line_col();
bail!("Invalid Tera syntax at line {}, col {}", line_no, col_no);
},
_ => unreachable!(),
}
};
let mut nodes = vec![];
// We must have at least a `template` pair if we got there
for p in pairs.next().unwrap().into_inner() {
match p.as_rule() {
Rule::extends_tag => {
let (ws, file) = parse_extends_include(p);
nodes.push(Node::Extends(ws, file));
}
Rule::content => nodes.extend(parse_content(p)),
Rule::comment_tag => (),
_ => unreachable!("unknown tpl rule: {:?}", p.as_rule()),
}
}
Ok(nodes)
}
| {
let mut start_ws = WS::default();
let mut end_ws = WS::default();
let mut filter = None;
let mut body = vec![];
for p in pair.into_inner() {
match p.as_rule() {
Rule::filter_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => start_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => start_ws.right = p2.into_span().as_str() == "-%}",
Rule::fn_call => filter = Some(parse_fn_call(p2)),
Rule::ident => filter = Some(FunctionCall { name: p2.as_str().to_string(), args: HashMap::new() }),
_ => unreachable!("Got {:?} while parsing filter_tag", p2),
}
}
},
Rule::content | Rule::macro_content | Rule::block_content => body.extend(parse_content(p)),
Rule::endfilter_tag => {
for p2 in p.into_inner() {
match p2.as_rule() {
Rule::tag_start => end_ws.left = p2.into_span().as_str() == "{%-",
Rule::tag_end => end_ws.right = p2.into_span().as_str() == "-%}",
_ => unreachable!(),
}
}
},
_ => unreachable!("unexpected {:?} rule in parse_filter_section", p.as_rule()),
};
}
Node::FilterSection(start_ws, FilterSection {filter: filter.unwrap(), body}, end_ws)
} |
cs_labels.py | #!/usr/bin/python
#
# Cityscapes labels
#
from collections import namedtuple
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
Label( 'lane marking' , 34 , 19 , 'vehicle' , 1 , False , True , (192, 64,192) ),
]
#--------------------------------------------------------------------------------
# Create dictionaries for a fast lookup
#--------------------------------------------------------------------------------
# Please refer to the main method below for example usages!
# name to label object
name2label = { label.name : label for label in labels }
# id to label object
id2label = { label.id : label for label in labels }
# trainId to label object
trainId2label = { label.trainId : label for label in reversed(labels) }
# category to list of label objects
category2labels = {}
for label in labels:
category = label.category
if category in category2labels:
category2labels[category].append(label)
else:
category2labels[category] = [label]
#--------------------------------------------------------------------------------
# Assure single instance name
#--------------------------------------------------------------------------------
# returns the label name that describes a single instance (if possible)
# e.g. input | output
# ----------------------
# car | car
# cargroup | car
# foo | None
# foogroup | None
# skygroup | None
def assureSingleInstanceName( name ):
# if the name is known, it is not a group
if name in name2label:
return name
# test if the name actually denotes a group
if not name.endswith("group"):
return None
# remove group
name = name[:-len("group")]
# test if the new name exists
if not name in name2label:
return None
# test if the new name denotes a label that actually has instances
if not name2label[name].hasInstances:
return None
# all good then
return name
#--------------------------------------------------------------------------------
# Main for testing
#--------------------------------------------------------------------------------
# just a dummy main
if __name__ == "__main__":
# Print all the labels
print("List of cityscapes labels:")
print("")
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( 'name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances', 'ignoreInEval' ))
print(" " + ('-' * 98))
for label in labels:
print(" {:>21} | {:>3} | {:>7} | {:>14} | {:>10} | {:>12} | {:>12}".format( label.name, label.id, label.trainId, label.category, label.categoryId, label.hasInstances, label.ignoreInEval ))
print("")
print("Example usages:")
| # Map from name to label
name = 'car'
id = name2label[name].id
print("ID of label '{name}': {id}".format( name=name, id=id ))
# Map from ID to label
category = id2label[id].category
print("Category of label with ID '{id}': {category}".format( id=id, category=category ))
# Map from trainID to label
trainId = 0
name = trainId2label[trainId].name
print("Name of label with trainID '{id}': {name}".format( id=trainId, name=name )) | |
41.js | import React from "react";
import Panel from "@/components/Panel/Panel.js";
function Button() {
return (
<div>
<Panel />
<div id="viewer">
<div className="f416">
<h1 className="buttonh1">Button#41</h1>
<a href="/buttons/41">
<button className="btn41-43 btn-41">Button</button>
</a>
<h2 className="h2source">
Creator:{" "}
<a
href="https://github.com/r1"
className="avis"
target="_blank"
rel="noopener noreferrer"
>
github.com/r1
</a>
</h2>
<div>
<pre className="prettyprint">{`<a href="/">
<button className="btn41-43 btn-41">Button</button>
</a>`}</pre>
</div>
<div>
<pre className="prettyprint">{`.btn41-43 {
padding: 10px 25px;
font-family: "Roboto", sans-serif;
font-weight: 500;
background: transparent;
outline: none !important;
cursor: pointer;
transition: all 0.3s ease;
position: relative;
display: inline-block;
}
.btn-41 {
border: 2px solid rgb(255, 255, 255);
z-index: 1;
color: white;
}
.btn-41:after {
position: absolute;
content: "";
width: 0;
height: 100%;
top: 0;
left: 0;
direction: rtl;
z-index: -1;
background: rgb(255, 255, 255);
transition: all 0.3s ease;
}
.btn-41:hover {
color: rgb(0, 0, 0);
}
.btn-41:hover:after {
left: auto;
right: 0; | width: 100%;
}
.btn-41:active {
top: 2px;
}`}</pre>
</div>
</div>
</div>
</div>
);
}
export default Button; | |
notifications.controller.ts | import { Controller, Get, Param } from '@nestjs/common';
import { NotificationsService } from '../../services/notifications/notifications.service';
@Controller('api/v1/notifications')
export class | {
constructor(private readonly notificationsService: NotificationsService) {}
@Get()
find(): string {
return this.notificationsService.find();
}
@Get('mark/:id')
mark(@Param() params): string {
return this.notificationsService.mark(params);
}
@Get('mark-all')
markAll(): string {
return this.notificationsService.markAll();
}
@Get('test')
test(): string {
return this.notificationsService.test();
}
}
| NotificationsController |
cathegories-depenses.service.ts | import { Injectable, InternalServerErrorException, NotFoundException } from '@nestjs/common';
import { InjectRepository } from '@nestjs/typeorm';
import { CreateCathegoriesDepenseDto } from './dto/create-cathegories-depense.dto';
import { UpdateCathegoriesDepenseDto } from './dto/update-cathegories-depense.dto';
import { CathegoriesDepense } from './entities/cathegories-depense.entity';
import { Repository, UpdateResult, DeleteResult } from 'typeorm';
@Injectable()
export class CathegoriesDepensesService {
constructor(
@InjectRepository(CathegoriesDepense)
private cathegoriesDepensesRepository: Repository<CathegoriesDepense>
) {}
async create(createCathegoriesDepenseDto: CreateCathegoriesDepenseDto): Promise<CathegoriesDepense> {
try {
const cathegoriesDepense: CathegoriesDepense = await this.cathegoriesDepensesRepository.save(createCathegoriesDepenseDto)
return cathegoriesDepense;
} catch (error) {
throw new InternalServerErrorException();
}
}
async findAll(): Promise<CathegoriesDepense[]> {
try {
const cathegoriesDepenses: CathegoriesDepense[] = await this.cathegoriesDepensesRepository.find();
if (cathegoriesDepenses.length === 0) {
throw new NotFoundException()
}
return cathegoriesDepenses;
} catch (error) {
if (error.response.statusCode === 404) {
throw new NotFoundException();
} else {
throw new InternalServerErrorException();
}
}
}
| if (!cathegoriesDepenses) {
throw new NotFoundException()
}
return cathegoriesDepenses;
} catch (error) {
if (error.response.statusCode === 404) {
throw new NotFoundException();
} else {
throw new InternalServerErrorException();
}
}
}
async update(id: number, updateCathegoriesDepenseDto: UpdateCathegoriesDepenseDto): Promise<UpdateResult> {
try {
const affected: UpdateResult = await this.cathegoriesDepensesRepository.update(id, updateCathegoriesDepenseDto);
if (affected.affected===0) {
throw new NotFoundException()
}
return affected;
} catch (error) {
if (error.response.statusCode === 404) {
throw new NotFoundException();
} else {
throw new InternalServerErrorException();
}
}
}
async remove(id: number):Promise<DeleteResult> {
try {
const affected: DeleteResult = await this.cathegoriesDepensesRepository.delete(id);
if (affected.affected===0) {
throw new NotFoundException()
}
return affected;
} catch (error) {
if (error.response.statusCode === 404) {
throw new NotFoundException();
} else {
throw new InternalServerErrorException();
}
}
}
} | async findOne(id: number): Promise<CathegoriesDepense> {
try {
const cathegoriesDepenses: CathegoriesDepense = await this.cathegoriesDepensesRepository.findOne(id); |
memoized_log_cutting.py | # nlantau, 2021-01-17
INT_MIN=-32422
def cut_log(p,n):
r = [0 for _ in range(n+1)]
r[0] = 0
for j in range(1,n+1):
q = INT_MIN
for i in range(1,j+1):
q = max(q, p[i] + r[j-i])
r[j] = q
return r[n]
# Clever solutions
def cl(p,n):
l = [0]
for _ in range(n):
l.append(max(pi+li for pi, li in zip(p[1:], l[::-1])))
return l[n]
p = [ 0, 1, 5, 8, 9, 10, 17, 17, 20, 24, # 0X's
30, 32, 35, 39, 43, 43, 45, 49, 50, 54, # 1X's
57, 60, 65, 68, 70, 74, 80, 81, 84, 85, # 2X's
87, 91, 95, 99, 101, 104, 107, 112, 115, 116, # 3X's
119] # 40th element
print(cut_log(p, 8), "should equal 22")
print(cl(p, 8), "should equal 22") | print(cl(p, 10), "should equal 30")
print(cut_log(p, 22), "should equal 65")
print(cl(p, 22), "should equal 65")
print(cut_log(p, 35), "should equal 105")
print(cl(p, 35), "should equal 105") | print(cut_log(p, 10), "should equal 30") |
purine.js | module.exports = { | C: " #40E0D0",
G: " #FF83FA",
R: " #FF83FA",
T: " #40E0D0",
U: " #40E0D0",
Y: " #40E0D0"
}; | A: " #FF83FA", |
expected.d.ts | interface A {}
interface B {}
interface C {}
declare class | {
get a(): A;
get b(): B;
set b(_: B);
readonly c: C;
}
export { D };
| D |
language.go | package localizer
import (
"bot/domain"
)
type LanguageLocalizer interface {
L(messageID string) string
LT(messageID string, template map[string]interface{}) string
LTP(messageID string, template map[string]interface{}, plural interface{}) string
Language() string
Localizer() domain.LocalizerGateway
}
func | (loc domain.LocalizerGateway, lang string) LanguageLocalizer {
return &languageLocalizer{
loc: loc,
lang: lang,
}
}
type languageLocalizer struct {
loc domain.LocalizerGateway
lang string
}
func (l *languageLocalizer) L(messageID string) string {
return l.loc.L(messageID, l.lang)
}
func (l *languageLocalizer) LT(messageID string, template map[string]interface{}) string {
return l.loc.LT(messageID, l.lang, template)
}
func (l *languageLocalizer) LTP(messageID string, template map[string]interface{}, plural interface{}) string {
return l.loc.LTP(messageID, l.lang, template, plural)
}
func (l *languageLocalizer) Language() string {
return l.lang
}
func (l *languageLocalizer) Localizer() domain.LocalizerGateway {
return l.loc
}
| WithLanguage |
prblm_3.py | import math
def large_prime_fact(num):
fact = 2
while(fact * fact <= num):
while num%fact == 0:
num /= fact
fact += 1
if num > 1:
|
return fact
print(large_prime_fact(13195))
print(large_prime_fact(600851475143)) | return num |
setup.py | #!/usr/bin/env python3
import os
from setuptools import setup, Extension
def get_version():
|
# Should fail if the readme is missing
long_des = open('README.rst', 'r').read()
cnmrstar = Extension('cnmrstar',
sources=['c/cnmrstarmodule.c'],
extra_compile_args=["-funroll-loops", "-O3"],
optional=True)
setup(name='pynmrstar',
version=get_version(),
packages=['pynmrstar'],
ext_modules=[cnmrstar],
install_requires=['requests>=2.21.0,<=3'],
python_requires='>=3.6',
author='Jon Wedell',
author_email='[email protected]',
description='PyNMR-STAR provides tools for reading, writing, modifying, and interacting with NMR-STAR files. '
'Maintained by the BMRB.',
long_description=long_des,
long_description_content_type='text/x-rst',
keywords=['bmrb', 'parser', 'nmr', 'nmrstar', 'biomagresbank', 'biological magnetic resonance bank'],
url='https://github.com/uwbmrb/PyNMRSTAR',
license='MIT',
package_data={'pynmrstar': ['reference_files/schema.csv',
'reference_files/comments.str',
'reference_files/data_types.csv']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| internal_file_location = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'pynmrstar', '_internal.py')
with open(internal_file_location, 'r') as internal_file:
for line in internal_file:
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.") |
build.rs | extern crate bindgen;
use std::path::PathBuf;
use std::process::Command;
const HEADER_PATH: &str = "include/uapi/linux/nitro_enclaves.h";
const OUT_FILE: &str = "/driver_structs.rs";
fn | () {
// Get latest commit SHA.
let output = Command::new("git")
.arg("describe")
.arg("--always")
.arg("--dirty")
.output();
// Convert command output to latest commit SHA and set an environment
// variable ("COMMIT_ID", only available in the build context) to
// the aforementioned commit SHA.
let stdout;
let output_str: &str = match output {
Ok(output) => {
stdout = output.stdout;
std::str::from_utf8(&stdout).expect("Invalid UTF-8 string provided")
}
_ => "",
};
println!("cargo:rustc-env=COMMIT_ID={}", output_str.trim());
println!("cargo:rerun-if-changed={}", HEADER_PATH);
let bindings = bindgen::Builder::default()
.header(HEADER_PATH)
.whitelist_type("ne_.*")
.whitelist_var("NE_ERR_.*")
.clang_arg(r"-fretain-comments-from-system-headers")
.clang_arg(r"-fparse-all-comments")
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
.generate_comments(true)
.generate()
.expect("Unable to generate bindings");
let mut path_str = std::env::var("OUT_DIR").unwrap();
path_str.push_str(&OUT_FILE);
let out_path = PathBuf::from(path_str);
bindings
.write_to_file(out_path)
.expect("Could not write bindings");
}
| main |
dialog.spec.ts | import {
ComponentFixture,
fakeAsync,
flushMicrotasks,
inject,
TestBed,
tick,
flush,
} from '@angular/core/testing';
import {
ChangeDetectionStrategy,
Component,
Directive,
Inject,
Injector,
NgModule,
TemplateRef,
ViewChild,
ViewContainerRef,
ComponentFactoryResolver
} from '@angular/core';
import {By} from '@angular/platform-browser';
import {NoopAnimationsModule} from '@angular/platform-browser/animations';
import {Location} from '@angular/common';
import {SpyLocation} from '@angular/common/testing';
import {Directionality} from '@angular/cdk/bidi';
import {MatDialogContainer} from './dialog-container';
import {OverlayContainer, ScrollStrategy, Overlay} from '@angular/cdk/overlay';
import {ScrollDispatcher} from '@angular/cdk/scrolling';
import {A, ESCAPE} from '@angular/cdk/keycodes';
import {dispatchKeyboardEvent, createKeyboardEvent} from '@angular/cdk/testing';
import {
MAT_DIALOG_DATA,
MatDialog,
MatDialogModule,
MatDialogRef,
MAT_DIALOG_DEFAULT_OPTIONS
} from './index';
import {Subject} from 'rxjs';
describe('MatDialog', () => {
let dialog: MatDialog;
let overlayContainer: OverlayContainer;
let overlayContainerElement: HTMLElement;
let scrolledSubject = new Subject();
let testViewContainerRef: ViewContainerRef;
let viewContainerFixture: ComponentFixture<ComponentWithChildViewContainer>;
let mockLocation: SpyLocation;
beforeEach(fakeAsync(() => {
TestBed.configureTestingModule({
imports: [MatDialogModule, DialogTestModule],
providers: [
{provide: Location, useClass: SpyLocation},
{provide: ScrollDispatcher, useFactory: () => ({
scrolled: () => scrolledSubject.asObservable()
})},
],
});
TestBed.compileComponents();
}));
beforeEach(inject([MatDialog, Location, OverlayContainer],
(d: MatDialog, l: Location, oc: OverlayContainer) => {
dialog = d;
mockLocation = l as SpyLocation;
overlayContainer = oc;
overlayContainerElement = oc.getContainerElement();
}));
afterEach(() => {
overlayContainer.ngOnDestroy();
});
beforeEach(() => {
viewContainerFixture = TestBed.createComponent(ComponentWithChildViewContainer);
viewContainerFixture.detectChanges();
testViewContainerRef = viewContainerFixture.componentInstance.childViewContainer;
});
it('should open a dialog with a component', () => {
let dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.textContent).toContain('Pizza');
expect(dialogRef.componentInstance instanceof PizzaMsg).toBe(true);
expect(dialogRef.componentInstance.dialogRef).toBe(dialogRef);
viewContainerFixture.detectChanges();
let dialogContainerElement = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(dialogContainerElement.getAttribute('role')).toBe('dialog');
});
it('should open a dialog with a template', () => {
const templateRefFixture = TestBed.createComponent(ComponentWithTemplateRef);
templateRefFixture.componentInstance.localValue = 'Bees';
templateRefFixture.detectChanges();
const data = {value: 'Knees'};
let dialogRef = dialog.open(templateRefFixture.componentInstance.templateRef, { data });
viewContainerFixture.detectChanges();
expect(overlayContainerElement.textContent).toContain('Cheese Bees Knees');
expect(templateRefFixture.componentInstance.dialogRef).toBe(dialogRef);
viewContainerFixture.detectChanges();
let dialogContainerElement = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(dialogContainerElement.getAttribute('role')).toBe('dialog');
dialogRef.close();
});
it('should emit when dialog opening animation is complete', fakeAsync(() => {
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
const spy = jasmine.createSpy('afterOpen spy');
dialogRef.afterOpened().subscribe(spy);
viewContainerFixture.detectChanges();
// callback should not be called before animation is complete
expect(spy).not.toHaveBeenCalled();
flushMicrotasks();
expect(spy).toHaveBeenCalled();
}));
it('should use injector from viewContainerRef for DialogInjector', () => {
let dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
let dialogInjector = dialogRef.componentInstance.dialogInjector;
expect(dialogRef.componentInstance.dialogRef).toBe(dialogRef);
expect(dialogInjector.get<DirectiveWithViewContainer>(DirectiveWithViewContainer)).toBeTruthy(
'Expected the dialog component to be created with the injector from the viewContainerRef.'
);
});
it('should open a dialog with a component and no ViewContainerRef', () => {
let dialogRef = dialog.open(PizzaMsg);
viewContainerFixture.detectChanges();
expect(overlayContainerElement.textContent).toContain('Pizza');
expect(dialogRef.componentInstance instanceof PizzaMsg).toBe(true);
expect(dialogRef.componentInstance.dialogRef).toBe(dialogRef);
viewContainerFixture.detectChanges();
let dialogContainerElement = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(dialogContainerElement.getAttribute('role')).toBe('dialog');
});
it('should apply the configured role to the dialog element', () => {
dialog.open(PizzaMsg, { role: 'alertdialog' });
viewContainerFixture.detectChanges();
let dialogContainerElement = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(dialogContainerElement.getAttribute('role')).toBe('alertdialog');
});
it('should apply the specified `aria-describedby`', () => {
dialog.open(PizzaMsg, { ariaDescribedBy: 'description-element' });
viewContainerFixture.detectChanges();
let dialogContainerElement = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(dialogContainerElement.getAttribute('aria-describedby')).toBe('description-element');
});
it('should close a dialog and get back a result', fakeAsync(() => {
let dialogRef = dialog.open(PizzaMsg, { viewContainerRef: testViewContainerRef });
let afterCloseCallback = jasmine.createSpy('afterClose callback');
dialogRef.afterClosed().subscribe(afterCloseCallback);
dialogRef.close('Charmander');
viewContainerFixture.detectChanges();
flush();
expect(afterCloseCallback).toHaveBeenCalledWith('Charmander');
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeNull();
}));
it('should dispose of dialog if view container is destroyed while animating', fakeAsync(() => {
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
dialogRef.close();
viewContainerFixture.detectChanges();
viewContainerFixture.destroy();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeNull();
}));
it('should dispatch the beforeClose and afterClose events when the ' +
'overlay is detached externally', fakeAsync(inject([Overlay], (overlay: Overlay) => {
const dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef,
scrollStrategy: overlay.scrollStrategies.close()
});
const beforeCloseCallback = jasmine.createSpy('beforeClosed callback');
const afterCloseCallback = jasmine.createSpy('afterClosed callback');
dialogRef.beforeClose().subscribe(beforeCloseCallback);
dialogRef.afterClosed().subscribe(afterCloseCallback);
scrolledSubject.next();
viewContainerFixture.detectChanges();
flush();
expect(beforeCloseCallback).toHaveBeenCalledTimes(1);
expect(afterCloseCallback).toHaveBeenCalledTimes(1);
})));
it('should close a dialog and get back a result before it is closed', fakeAsync(() => {
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
flush();
viewContainerFixture.detectChanges();
// beforeClose should emit before dialog container is destroyed
const beforeCloseHandler = jasmine.createSpy('beforeClose callback').and.callFake(() => {
expect(overlayContainerElement.querySelector('mat-dialog-container'))
.not.toBeNull('dialog container exists when beforeClose is called');
});
dialogRef.beforeClosed().subscribe(beforeCloseHandler);
dialogRef.close('Bulbasaur');
viewContainerFixture.detectChanges();
flush();
expect(beforeCloseHandler).toHaveBeenCalledWith('Bulbasaur');
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeNull();
}));
it('should close a dialog via the escape key', fakeAsync(() => {
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
const event = dispatchKeyboardEvent(document.body, 'keydown', ESCAPE);
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeNull();
expect(event.defaultPrevented).toBe(true);
}));
it('should not close a dialog via the escape key with a modifier', fakeAsync(() => {
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
const event = createKeyboardEvent('keydown', ESCAPE);
Object.defineProperty(event, 'altKey', {get: () => true});
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeTruthy();
expect(event.defaultPrevented).toBe(false);
}));
it('should close from a ViewContainerRef with OnPush change detection', fakeAsync(() => {
const onPushFixture = TestBed.createComponent(ComponentWithOnPushViewContainer);
onPushFixture.detectChanges();
const dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: onPushFixture.componentInstance.viewContainerRef
});
flushMicrotasks();
onPushFixture.detectChanges();
flushMicrotasks();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length)
.toBe(1, 'Expected one open dialog.');
dialogRef.close();
flushMicrotasks();
onPushFixture.detectChanges();
tick(500);
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length)
.toBe(0, 'Expected no open dialogs.');
}));
it('should close when clicking on the overlay backdrop', fakeAsync(() => {
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
let backdrop = overlayContainerElement.querySelector('.cdk-overlay-backdrop') as HTMLElement;
backdrop.click();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeFalsy();
}));
it('should emit the backdropClick stream when clicking on the overlay backdrop', fakeAsync(() => {
const dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
const spy = jasmine.createSpy('backdropClick spy');
dialogRef.backdropClick().subscribe(spy);
viewContainerFixture.detectChanges();
let backdrop = overlayContainerElement.querySelector('.cdk-overlay-backdrop') as HTMLElement;
backdrop.click();
expect(spy).toHaveBeenCalledTimes(1);
viewContainerFixture.detectChanges();
flush();
// Additional clicks after the dialog has closed should not be emitted
backdrop.click();
expect(spy).toHaveBeenCalledTimes(1);
}));
it('should emit the keyboardEvent stream when key events target the overlay', fakeAsync(() => {
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
const spy = jasmine.createSpy('keyboardEvent spy');
dialogRef.keydownEvents().subscribe(spy);
viewContainerFixture.detectChanges();
let backdrop = overlayContainerElement.querySelector('.cdk-overlay-backdrop') as HTMLElement;
let container = overlayContainerElement.querySelector('mat-dialog-container') as HTMLElement;
dispatchKeyboardEvent(document.body, 'keydown', A);
dispatchKeyboardEvent(document.body, 'keydown', A, backdrop);
dispatchKeyboardEvent(document.body, 'keydown', A, container);
expect(spy).toHaveBeenCalledTimes(3);
}));
it('should notify the observers if a dialog has been opened', () => {
dialog.afterOpened.subscribe(ref => {
expect(dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
})).toBe(ref);
});
});
it('should notify the observers if all open dialogs have finished closing', fakeAsync(() => {
const ref1 = dialog.open(PizzaMsg, { viewContainerRef: testViewContainerRef });
const ref2 = dialog.open(ContentElementDialog, { viewContainerRef: testViewContainerRef });
const spy = jasmine.createSpy('afterAllClosed spy');
dialog.afterAllClosed.subscribe(spy);
ref1.close();
viewContainerFixture.detectChanges();
flush();
expect(spy).not.toHaveBeenCalled();
ref2.close();
viewContainerFixture.detectChanges();
flush();
expect(spy).toHaveBeenCalled();
}));
it('should emit the afterAllClosed stream on subscribe if there are no open dialogs', () => {
const spy = jasmine.createSpy('afterAllClosed spy');
dialog.afterAllClosed.subscribe(spy);
expect(spy).toHaveBeenCalled();
});
it('should override the width of the overlay pane', () => {
dialog.open(PizzaMsg, {
width: '500px'
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.width).toBe('500px');
});
it('should override the height of the overlay pane', () => {
dialog.open(PizzaMsg, {
height: '100px'
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.height).toBe('100px');
});
it('should override the min-width of the overlay pane', () => {
dialog.open(PizzaMsg, {
minWidth: '500px'
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.minWidth).toBe('500px');
});
it('should override the max-width of the overlay pane', fakeAsync(() => {
let dialogRef = dialog.open(PizzaMsg);
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.maxWidth).toBe('80vw',
'Expected dialog to set a default max-width on overlay pane');
dialogRef.close();
tick(500);
viewContainerFixture.detectChanges();
flushMicrotasks();
dialogRef = dialog.open(PizzaMsg, {
maxWidth: '100px'
});
viewContainerFixture.detectChanges();
overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.maxWidth).toBe('100px');
}));
it('should override the min-height of the overlay pane', () => {
dialog.open(PizzaMsg, {
minHeight: '300px'
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.minHeight).toBe('300px');
});
it('should override the max-height of the overlay pane', () => {
dialog.open(PizzaMsg, {
maxHeight: '100px'
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.maxHeight).toBe('100px');
});
it('should override the top offset of the overlay pane', () => {
dialog.open(PizzaMsg, {
position: {
top: '100px'
}
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.marginTop).toBe('100px');
});
it('should override the bottom offset of the overlay pane', () => {
dialog.open(PizzaMsg, {
position: {
bottom: '200px'
}
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.marginBottom).toBe('200px');
});
it('should override the left offset of the overlay pane', () => {
dialog.open(PizzaMsg, {
position: {
left: '250px'
}
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.marginLeft).toBe('250px');
});
it('should override the right offset of the overlay pane', () => {
dialog.open(PizzaMsg, {
position: {
right: '125px'
}
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.marginRight).toBe('125px');
});
it('should allow for the position to be updated', () => {
let dialogRef = dialog.open(PizzaMsg, {
position: {
left: '250px'
}
});
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.marginLeft).toBe('250px');
dialogRef.updatePosition({ left: '500px' });
expect(overlayPane.style.marginLeft).toBe('500px');
});
it('should allow for the dimensions to be updated', () => {
let dialogRef = dialog.open(PizzaMsg, { width: '100px' });
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.width).toBe('100px');
dialogRef.updateSize('200px');
expect(overlayPane.style.width).toBe('200px');
});
it('should reset the overlay dimensions to their initial size', () => {
let dialogRef = dialog.open(PizzaMsg);
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.width).toBeFalsy();
expect(overlayPane.style.height).toBeFalsy();
dialogRef.updateSize('200px', '200px');
expect(overlayPane.style.width).toBe('200px');
expect(overlayPane.style.height).toBe('200px');
dialogRef.updateSize();
expect(overlayPane.style.width).toBeFalsy();
expect(overlayPane.style.height).toBeFalsy();
});
it('should allow setting the layout direction', () => {
dialog.open(PizzaMsg, { direction: 'rtl' });
viewContainerFixture.detectChanges();
let overlayPane = overlayContainerElement.querySelector('.cdk-global-overlay-wrapper')!;
expect(overlayPane.getAttribute('dir')).toBe('rtl');
});
it('should inject the correct layout direction in the component instance', () => {
const dialogRef = dialog.open(PizzaMsg, { direction: 'rtl' });
viewContainerFixture.detectChanges();
expect(dialogRef.componentInstance.directionality.value).toBe('rtl');
});
it('should fall back to injecting the global direction if none is passed by the config', () => {
const dialogRef = dialog.open(PizzaMsg, {});
viewContainerFixture.detectChanges();
expect(dialogRef.componentInstance.directionality.value).toBe('ltr');
});
it('should close all of the dialogs', fakeAsync(() => {
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(3);
dialog.closeAll();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(0);
}));
it('should set the proper animation states', () => {
let dialogRef = dialog.open(PizzaMsg, { viewContainerRef: testViewContainerRef });
let dialogContainer: MatDialogContainer =
viewContainerFixture.debugElement.query(By.directive(MatDialogContainer)).componentInstance;
expect(dialogContainer._state).toBe('enter');
dialogRef.close();
expect(dialogContainer._state).toBe('exit');
});
it('should close all dialogs when the user goes forwards/backwards in history', fakeAsync(() => {
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(2);
mockLocation.simulateUrlPop('');
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(0);
}));
it('should close all open dialogs when the location hash changes', fakeAsync(() => {
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(2);
mockLocation.simulateHashChange('');
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(0);
}));
it('should close all of the dialogs when the injectable is destroyed', fakeAsync(() => {
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
dialog.open(PizzaMsg);
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(3);
dialog.ngOnDestroy();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(0);
}));
it('should complete open and close streams when the injectable is destroyed', fakeAsync(() => {
const afterOpenedSpy = jasmine.createSpy('after opened spy');
const afterAllClosedSpy = jasmine.createSpy('after all closed spy');
const afterOpenedSubscription = dialog.afterOpened.subscribe({complete: afterOpenedSpy});
const afterAllClosedSubscription = dialog.afterAllClosed.subscribe({
complete: afterAllClosedSpy
});
dialog.ngOnDestroy();
expect(afterOpenedSpy).toHaveBeenCalled();
expect(afterAllClosedSpy).toHaveBeenCalled();
afterOpenedSubscription.unsubscribe();
afterAllClosedSubscription.unsubscribe();
}));
it('should allow the consumer to disable closing a dialog on navigation', fakeAsync(() => {
dialog.open(PizzaMsg);
dialog.open(PizzaMsg, {closeOnNavigation: false});
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(2);
mockLocation.simulateUrlPop('');
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('mat-dialog-container').length).toBe(1);
}));
it('should have the componentInstance available in the afterClosed callback', fakeAsync(() => {
let dialogRef = dialog.open(PizzaMsg);
let spy = jasmine.createSpy('afterClosed spy');
flushMicrotasks();
viewContainerFixture.detectChanges();
flushMicrotasks();
dialogRef.afterClosed().subscribe(() => {
spy();
expect(dialogRef.componentInstance).toBeTruthy('Expected component instance to be defined.');
});
dialogRef.close();
flushMicrotasks();
viewContainerFixture.detectChanges();
tick(500);
// Ensure that the callback actually fires.
expect(spy).toHaveBeenCalled();
}));
it('should be able to attach a custom scroll strategy', fakeAsync(() => {
const scrollStrategy: ScrollStrategy = {
attach: () => {},
enable: jasmine.createSpy('scroll strategy enable spy'),
disable: () => {}
};
dialog.open(PizzaMsg, {scrollStrategy});
expect(scrollStrategy.enable).toHaveBeenCalled();
}));
it('should be able to pass in an alternate ComponentFactoryResolver',
inject([ComponentFactoryResolver], (resolver: ComponentFactoryResolver) => {
spyOn(resolver, 'resolveComponentFactory').and.callThrough();
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef,
componentFactoryResolver: resolver
});
viewContainerFixture.detectChanges();
expect(resolver.resolveComponentFactory).toHaveBeenCalled();
}));
describe('passing in data', () => {
it('should be able to pass in data', () => {
let config = {
data: {
stringParam: 'hello',
dateParam: new Date()
}
};
let instance = dialog.open(DialogWithInjectedData, config).componentInstance;
expect(instance.data.stringParam).toBe(config.data.stringParam);
expect(instance.data.dateParam).toBe(config.data.dateParam);
});
it('should default to null if no data is passed', () => {
expect(() => {
let dialogRef = dialog.open(DialogWithInjectedData);
expect(dialogRef.componentInstance.data).toBeNull();
}).not.toThrow();
});
});
it('should not keep a reference to the component after the dialog is closed', fakeAsync(() => {
let dialogRef = dialog.open(PizzaMsg);
expect(dialogRef.componentInstance).toBeTruthy();
dialogRef.close();
viewContainerFixture.detectChanges();
flush();
expect(dialogRef.componentInstance).toBeFalsy('Expected reference to have been cleared.');
}));
it('should assign a unique id to each dialog', () => {
const one = dialog.open(PizzaMsg);
const two = dialog.open(PizzaMsg);
expect(one.id).toBeTruthy();
expect(two.id).toBeTruthy();
expect(one.id).not.toBe(two.id);
});
it('should allow for the id to be overwritten', () => {
const dialogRef = dialog.open(PizzaMsg, { id: 'pizza' });
expect(dialogRef.id).toBe('pizza');
});
it('should throw when trying to open a dialog with the same id as another dialog', () => {
dialog.open(PizzaMsg, { id: 'pizza' });
expect(() => dialog.open(PizzaMsg, { id: 'pizza' })).toThrowError(/must be unique/g);
});
it('should be able to find a dialog by id', () => {
const dialogRef = dialog.open(PizzaMsg, { id: 'pizza' });
expect(dialog.getDialogById('pizza')).toBe(dialogRef);
});
it('should toggle `aria-hidden` on the overlay container siblings', fakeAsync(() => {
const sibling = document.createElement('div');
overlayContainerElement.parentNode!.appendChild(sibling);
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
viewContainerFixture.detectChanges();
flush();
expect(sibling.getAttribute('aria-hidden')).toBe('true', 'Expected sibling to be hidden');
expect(overlayContainerElement.hasAttribute('aria-hidden'))
.toBe(false, 'Expected overlay container not to be hidden.');
dialogRef.close();
viewContainerFixture.detectChanges();
flush();
expect(sibling.hasAttribute('aria-hidden'))
.toBe(false, 'Expected sibling to no longer be hidden.');
sibling.parentNode!.removeChild(sibling);
}));
it('should restore `aria-hidden` to the overlay container siblings on close', fakeAsync(() => {
const sibling = document.createElement('div');
sibling.setAttribute('aria-hidden', 'true');
overlayContainerElement.parentNode!.appendChild(sibling);
const dialogRef = dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
viewContainerFixture.detectChanges();
flush();
expect(sibling.getAttribute('aria-hidden')).toBe('true', 'Expected sibling to be hidden.');
dialogRef.close();
viewContainerFixture.detectChanges();
flush();
expect(sibling.getAttribute('aria-hidden')).toBe('true', 'Expected sibling to remain hidden.');
sibling.parentNode!.removeChild(sibling);
}));
it('should not set `aria-hidden` on `aria-live` elements', fakeAsync(() => {
const sibling = document.createElement('div');
sibling.setAttribute('aria-live', 'polite');
overlayContainerElement.parentNode!.appendChild(sibling);
dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
viewContainerFixture.detectChanges();
flush();
expect(sibling.hasAttribute('aria-hidden'))
.toBe(false, 'Expected live element not to be hidden.');
sibling.parentNode!.removeChild(sibling);
}));
it('should add and remove classes while open', () => {
let dialogRef = dialog.open(PizzaMsg, {
disableClose: true,
viewContainerRef: testViewContainerRef
});
const pane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(pane.classList)
.not.toContain('custom-class-one', 'Expected class to be initially missing');
dialogRef.addPanelClass('custom-class-one');
expect(pane.classList).toContain('custom-class-one', 'Expected class to be added');
dialogRef.removePanelClass('custom-class-one');
expect(pane.classList).not.toContain('custom-class-one', 'Expected class to be removed');
});
describe('disableClose option', () => {
it('should prevent closing via clicks on the backdrop', fakeAsync(() => {
dialog.open(PizzaMsg, {
disableClose: true,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
let backdrop = overlayContainerElement.querySelector('.cdk-overlay-backdrop') as HTMLElement;
backdrop.click();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeTruthy();
}));
it('should prevent closing via the escape key', fakeAsync(() => {
dialog.open(PizzaMsg, {
disableClose: true,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
dispatchKeyboardEvent(document.body, 'keydown', ESCAPE);
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeTruthy();
}));
it('should allow for the disableClose option to be updated while open', fakeAsync(() => {
let dialogRef = dialog.open(PizzaMsg, {
disableClose: true,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
let backdrop = overlayContainerElement.querySelector('.cdk-overlay-backdrop') as HTMLElement;
backdrop.click();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeTruthy();
dialogRef.disableClose = false;
backdrop.click();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeFalsy();
}));
});
describe('hasBackdrop option', () => {
it('should have a backdrop', () => {
dialog.open(PizzaMsg, {
hasBackdrop: true,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.cdk-overlay-backdrop')).toBeTruthy();
});
it('should not have a backdrop', () => {
dialog.open(PizzaMsg, {
hasBackdrop: false,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.cdk-overlay-backdrop')).toBeFalsy();
});
});
describe('panelClass option', () => {
it('should have custom panel class', () => {
dialog.open(PizzaMsg, {
panelClass: 'custom-panel-class',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.custom-panel-class')).toBeTruthy();
});
});
describe('backdropClass option', () => {
it('should have default backdrop class', () => {
dialog.open(PizzaMsg, {
backdropClass: '',
viewContainerRef: testViewContainerRef |
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.cdk-overlay-dark-backdrop')).toBeTruthy();
});
it('should have custom backdrop class', () => {
dialog.open(PizzaMsg, {
backdropClass: 'custom-backdrop-class',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.custom-backdrop-class')).toBeTruthy();
});
});
describe('focus management', () => {
// When testing focus, all of the elements must be in the DOM.
beforeEach(() => document.body.appendChild(overlayContainerElement));
afterEach(() => document.body.removeChild(overlayContainerElement));
it('should focus the first tabbable element of the dialog on open', fakeAsync(() => {
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.tagName)
.toBe('INPUT', 'Expected first tabbable element (input) in the dialog to be focused.');
}));
it('should allow disabling focus of the first tabbable element', fakeAsync(() => {
dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef,
autoFocus: false
});
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.tagName).not.toBe('INPUT');
}));
it('should re-focus trigger element when dialog closes', fakeAsync(() => {
// Create a element that has focus before the dialog is opened.
let button = document.createElement('button');
button.id = 'dialog-trigger';
document.body.appendChild(button);
button.focus();
let dialogRef = dialog.open(PizzaMsg, { viewContainerRef: testViewContainerRef });
flushMicrotasks();
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.id)
.not.toBe('dialog-trigger', 'Expected the focus to change when dialog was opened.');
dialogRef.close();
expect(document.activeElement!.id).not.toBe('dialog-trigger',
'Expcted the focus not to have changed before the animation finishes.');
flushMicrotasks();
viewContainerFixture.detectChanges();
tick(500);
expect(document.activeElement!.id).toBe('dialog-trigger',
'Expected that the trigger was refocused after the dialog is closed.');
document.body.removeChild(button);
}));
it('should allow the consumer to shift focus in afterClosed', fakeAsync(() => {
// Create a element that has focus before the dialog is opened.
let button = document.createElement('button');
let input = document.createElement('input');
button.id = 'dialog-trigger';
input.id = 'input-to-be-focused';
document.body.appendChild(button);
document.body.appendChild(input);
button.focus();
let dialogRef = dialog.open(PizzaMsg, { viewContainerRef: testViewContainerRef });
tick(500);
viewContainerFixture.detectChanges();
dialogRef.afterClosed().subscribe(() => input.focus());
dialogRef.close();
tick(500);
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.id).toBe('input-to-be-focused',
'Expected that the trigger was refocused after the dialog is closed.');
document.body.removeChild(button);
document.body.removeChild(input);
flush();
}));
it('should move focus to the container if there are no focusable elements in the dialog',
fakeAsync(() => {
dialog.open(DialogWithoutFocusableElements);
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.tagName)
.toBe('MAT-DIALOG-CONTAINER', 'Expected dialog container to be focused.');
}));
it('should be able to disable focus restoration', fakeAsync(() => {
// Create a element that has focus before the dialog is opened.
const button = document.createElement('button');
button.id = 'dialog-trigger';
document.body.appendChild(button);
button.focus();
const dialogRef = dialog.open(PizzaMsg, {
viewContainerRef: testViewContainerRef,
restoreFocus: false
});
flushMicrotasks();
viewContainerFixture.detectChanges();
flushMicrotasks();
expect(document.activeElement!.id)
.not.toBe('dialog-trigger', 'Expected the focus to change when dialog was opened.');
dialogRef.close();
flushMicrotasks();
viewContainerFixture.detectChanges();
tick(500);
expect(document.activeElement!.id).not.toBe('dialog-trigger',
'Expected focus not to have been restored.');
document.body.removeChild(button);
}));
});
describe('dialog content elements', () => {
let dialogRef: MatDialogRef<any>;
describe('inside component dialog', () => {
beforeEach(fakeAsync(() => {
dialogRef = dialog.open(ContentElementDialog, {viewContainerRef: testViewContainerRef});
viewContainerFixture.detectChanges();
flush();
}));
runContentElementTests();
});
describe('inside template portal', () => {
beforeEach(fakeAsync(() => {
const fixture = TestBed.createComponent(ComponentWithContentElementTemplateRef);
fixture.detectChanges();
dialogRef = dialog.open(fixture.componentInstance.templateRef, {
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
flush();
}));
runContentElementTests();
});
function runContentElementTests() {
it('should close the dialog when clicking on the close button', fakeAsync(() => {
expect(overlayContainerElement.querySelectorAll('.mat-dialog-container').length).toBe(1);
(overlayContainerElement.querySelector('button[mat-dialog-close]') as HTMLElement).click();
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelectorAll('.mat-dialog-container').length).toBe(0);
}));
it('should not close if [mat-dialog-close] is applied on a non-button node', () => {
expect(overlayContainerElement.querySelectorAll('.mat-dialog-container').length).toBe(1);
(overlayContainerElement.querySelector('div[mat-dialog-close]') as HTMLElement).click();
expect(overlayContainerElement.querySelectorAll('.mat-dialog-container').length).toBe(1);
});
it('should allow for a user-specified aria-label on the close button', fakeAsync(() => {
let button = overlayContainerElement.querySelector('.close-with-aria-label')!;
expect(button.getAttribute('aria-label')).toBe('Best close button ever');
}));
it('should override the "type" attribute of the close button', () => {
let button = overlayContainerElement.querySelector('button[mat-dialog-close]')!;
expect(button.getAttribute('type')).toBe('button');
});
it('should return the [mat-dialog-close] result when clicking the close button',
fakeAsync(() => {
let afterCloseCallback = jasmine.createSpy('afterClose callback');
dialogRef.afterClosed().subscribe(afterCloseCallback);
(overlayContainerElement.querySelector('button.close-with-true') as HTMLElement).click();
viewContainerFixture.detectChanges();
flush();
expect(afterCloseCallback).toHaveBeenCalledWith(true);
}));
it('should set the aria-labelledby attribute to the id of the title', fakeAsync(() => {
let title = overlayContainerElement.querySelector('[mat-dialog-title]')!;
let container = overlayContainerElement.querySelector('mat-dialog-container')!;
flush();
viewContainerFixture.detectChanges();
expect(title.id).toBeTruthy('Expected title element to have an id.');
expect(container.getAttribute('aria-labelledby'))
.toBe(title.id, 'Expected the aria-labelledby to match the title id.');
}));
}
});
describe('aria-labelledby', () => {
it('should be able to set a custom aria-labelledby', () => {
dialog.open(PizzaMsg, {
ariaLabelledBy: 'Labelled By',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
const container = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(container.getAttribute('aria-labelledby')).toBe('Labelled By');
});
it('should not set the aria-labelledby automatically if it has an aria-label ' +
'and an aria-labelledby', fakeAsync(() => {
dialog.open(ContentElementDialog, {
ariaLabel: 'Hello there',
ariaLabelledBy: 'Labelled By',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
tick();
viewContainerFixture.detectChanges();
const container = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(container.hasAttribute('aria-labelledby')).toBe(false);
}));
it('should set the aria-labelledby attribute to the config provided aria-labelledby ' +
'instead of the mat-dialog-title id', fakeAsync(() => {
dialog.open(ContentElementDialog, {
ariaLabelledBy: 'Labelled By',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
flush();
let title = overlayContainerElement.querySelector('[mat-dialog-title]')!;
let container = overlayContainerElement.querySelector('mat-dialog-container')!;
flush();
viewContainerFixture.detectChanges();
expect(title.id).toBeTruthy('Expected title element to have an id.');
expect(container.getAttribute('aria-labelledby')).toBe('Labelled By');
}));
});
describe('aria-label', () => {
it('should be able to set a custom aria-label', () => {
dialog.open(PizzaMsg, {
ariaLabel: 'Hello there',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
const container = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(container.getAttribute('aria-label')).toBe('Hello there');
});
it('should not set the aria-labelledby automatically if it has an aria-label', fakeAsync(() => {
dialog.open(ContentElementDialog, {
ariaLabel: 'Hello there',
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
tick();
viewContainerFixture.detectChanges();
const container = overlayContainerElement.querySelector('mat-dialog-container')!;
expect(container.hasAttribute('aria-labelledby')).toBe(false);
}));
});
});
describe('MatDialog with a parent MatDialog', () => {
let parentDialog: MatDialog;
let childDialog: MatDialog;
let overlayContainerElement: HTMLElement;
let fixture: ComponentFixture<ComponentThatProvidesMatDialog>;
beforeEach(fakeAsync(() => {
TestBed.configureTestingModule({
imports: [MatDialogModule, DialogTestModule],
declarations: [ComponentThatProvidesMatDialog],
providers: [
{provide: OverlayContainer, useFactory: () => {
overlayContainerElement = document.createElement('div');
return {getContainerElement: () => overlayContainerElement};
}},
{provide: Location, useClass: SpyLocation}
],
});
TestBed.compileComponents();
}));
beforeEach(inject([MatDialog], (d: MatDialog) => {
parentDialog = d;
fixture = TestBed.createComponent(ComponentThatProvidesMatDialog);
childDialog = fixture.componentInstance.dialog;
fixture.detectChanges();
}));
afterEach(() => {
overlayContainerElement.innerHTML = '';
});
it('should close dialogs opened by a parent when calling closeAll on a child MatDialog',
fakeAsync(() => {
parentDialog.open(PizzaMsg);
fixture.detectChanges();
flush();
expect(overlayContainerElement.textContent)
.toContain('Pizza', 'Expected a dialog to be opened');
childDialog.closeAll();
fixture.detectChanges();
flush();
expect(overlayContainerElement.textContent!.trim())
.toBe('', 'Expected closeAll on child MatDialog to close dialog opened by parent');
}));
it('should close dialogs opened by a child when calling closeAll on a parent MatDialog',
fakeAsync(() => {
childDialog.open(PizzaMsg);
fixture.detectChanges();
expect(overlayContainerElement.textContent)
.toContain('Pizza', 'Expected a dialog to be opened');
parentDialog.closeAll();
fixture.detectChanges();
flush();
expect(overlayContainerElement.textContent!.trim())
.toBe('', 'Expected closeAll on parent MatDialog to close dialog opened by child');
}));
it('should close the top dialog via the escape key', fakeAsync(() => {
childDialog.open(PizzaMsg);
dispatchKeyboardEvent(document.body, 'keydown', ESCAPE);
fixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeNull();
}));
it('should not close the parent dialogs when a child is destroyed', fakeAsync(() => {
parentDialog.open(PizzaMsg);
fixture.detectChanges();
flush();
expect(overlayContainerElement.textContent)
.toContain('Pizza', 'Expected a dialog to be opened');
childDialog.ngOnDestroy();
fixture.detectChanges();
flush();
expect(overlayContainerElement.textContent)
.toContain('Pizza', 'Expected a dialog to be opened');
}));
});
describe('MatDialog with default options', () => {
let dialog: MatDialog;
let overlayContainer: OverlayContainer;
let overlayContainerElement: HTMLElement;
let testViewContainerRef: ViewContainerRef;
let viewContainerFixture: ComponentFixture<ComponentWithChildViewContainer>;
beforeEach(fakeAsync(() => {
const defaultConfig = {
hasBackdrop: false,
disableClose: true,
width: '100px',
height: '100px',
minWidth: '50px',
minHeight: '50px',
maxWidth: '150px',
maxHeight: '150px',
autoFocus: false,
};
TestBed.configureTestingModule({
imports: [MatDialogModule, DialogTestModule],
providers: [
{provide: MAT_DIALOG_DEFAULT_OPTIONS, useValue: defaultConfig},
],
});
TestBed.compileComponents();
}));
beforeEach(inject([MatDialog, OverlayContainer],
(d: MatDialog, oc: OverlayContainer) => {
dialog = d;
overlayContainer = oc;
overlayContainerElement = oc.getContainerElement();
}));
afterEach(() => {
overlayContainer.ngOnDestroy();
});
beforeEach(() => {
viewContainerFixture = TestBed.createComponent(ComponentWithChildViewContainer);
viewContainerFixture.detectChanges();
testViewContainerRef = viewContainerFixture.componentInstance.childViewContainer;
});
it('should use the provided defaults', () => {
dialog.open(PizzaMsg, {viewContainerRef: testViewContainerRef});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.cdk-overlay-backdrop')).toBeFalsy();
dispatchKeyboardEvent(document.body, 'keydown', ESCAPE);
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeTruthy();
expect(document.activeElement!.tagName).not.toBe('INPUT');
let overlayPane = overlayContainerElement.querySelector('.cdk-overlay-pane') as HTMLElement;
expect(overlayPane.style.width).toBe('100px');
expect(overlayPane.style.height).toBe('100px');
expect(overlayPane.style.minWidth).toBe('50px');
expect(overlayPane.style.minHeight).toBe('50px');
expect(overlayPane.style.maxWidth).toBe('150px');
expect(overlayPane.style.maxHeight).toBe('150px');
});
it('should be overridable by open() options', fakeAsync(() => {
dialog.open(PizzaMsg, {
hasBackdrop: true,
disableClose: false,
viewContainerRef: testViewContainerRef
});
viewContainerFixture.detectChanges();
expect(overlayContainerElement.querySelector('.cdk-overlay-backdrop')).toBeTruthy();
dispatchKeyboardEvent(document.body, 'keydown', ESCAPE);
viewContainerFixture.detectChanges();
flush();
expect(overlayContainerElement.querySelector('mat-dialog-container')).toBeFalsy();
}));
});
@Directive({selector: 'dir-with-view-container'})
class DirectiveWithViewContainer {
constructor(public viewContainerRef: ViewContainerRef) { }
}
@Component({
changeDetection: ChangeDetectionStrategy.OnPush,
template: 'hello',
})
class ComponentWithOnPushViewContainer {
constructor(public viewContainerRef: ViewContainerRef) { }
}
@Component({
selector: 'arbitrary-component',
template: `<dir-with-view-container></dir-with-view-container>`,
})
class ComponentWithChildViewContainer {
@ViewChild(DirectiveWithViewContainer, {
static: false
}) childWithViewContainer: DirectiveWithViewContainer;
get childViewContainer() {
return this.childWithViewContainer.viewContainerRef;
}
}
@Component({
selector: 'arbitrary-component-with-template-ref',
template: `<ng-template let-data let-dialogRef="dialogRef">
Cheese {{localValue}} {{data?.value}}{{setDialogRef(dialogRef)}}</ng-template>`,
})
class ComponentWithTemplateRef {
localValue: string;
dialogRef: MatDialogRef<any>;
@ViewChild(TemplateRef, {static: false}) templateRef: TemplateRef<any>;
setDialogRef(dialogRef: MatDialogRef<any>): string {
this.dialogRef = dialogRef;
return '';
}
}
/** Simple component for testing ComponentPortal. */
@Component({template: '<p>Pizza</p> <input> <button>Close</button>'})
class PizzaMsg {
constructor(public dialogRef: MatDialogRef<PizzaMsg>,
public dialogInjector: Injector,
public directionality: Directionality) {}
}
@Component({
template: `
<h1 mat-dialog-title>This is the title</h1>
<mat-dialog-content>Lorem ipsum dolor sit amet.</mat-dialog-content>
<mat-dialog-actions>
<button mat-dialog-close>Close</button>
<button class="close-with-true" [mat-dialog-close]="true">Close and return true</button>
<button
class="close-with-aria-label"
aria-label="Best close button ever"
[mat-dialog-close]="true"></button>
<div mat-dialog-close>Should not close</div>
</mat-dialog-actions>
`
})
class ContentElementDialog {}
@Component({
template: `
<ng-template>
<h1 mat-dialog-title>This is the title</h1>
<mat-dialog-content>Lorem ipsum dolor sit amet.</mat-dialog-content>
<mat-dialog-actions>
<button mat-dialog-close>Close</button>
<button class="close-with-true" [mat-dialog-close]="true">Close and return true</button>
<button
class="close-with-aria-label"
aria-label="Best close button ever"
[mat-dialog-close]="true"></button>
<div mat-dialog-close>Should not close</div>
</mat-dialog-actions>
</ng-template>
`
})
class ComponentWithContentElementTemplateRef {
@ViewChild(TemplateRef, {
static: false
}) templateRef: TemplateRef<any>;
}
@Component({
template: '',
providers: [MatDialog]
})
class ComponentThatProvidesMatDialog {
constructor(public dialog: MatDialog) {}
}
/** Simple component for testing ComponentPortal. */
@Component({template: ''})
class DialogWithInjectedData {
constructor(@Inject(MAT_DIALOG_DATA) public data: any) { }
}
@Component({template: '<p>Pasta</p>'})
class DialogWithoutFocusableElements {}
// Create a real (non-test) NgModule as a workaround for
// https://github.com/angular/angular/issues/10760
const TEST_DIRECTIVES = [
ComponentWithChildViewContainer,
ComponentWithTemplateRef,
PizzaMsg,
DirectiveWithViewContainer,
ComponentWithOnPushViewContainer,
ContentElementDialog,
DialogWithInjectedData,
DialogWithoutFocusableElements,
ComponentWithContentElementTemplateRef,
];
@NgModule({
imports: [MatDialogModule, NoopAnimationsModule],
exports: TEST_DIRECTIVES,
declarations: TEST_DIRECTIVES,
entryComponents: [
ComponentWithChildViewContainer,
ComponentWithTemplateRef,
PizzaMsg,
ContentElementDialog,
DialogWithInjectedData,
DialogWithoutFocusableElements,
],
})
class DialogTestModule { } | }); |
api.py | """
Módulo para recuperação de dados climáticos do PCBr.
A documentação do Projeto pode ser encontrada no Portal
http://pclima.inpe.br/
As escolhas para o download de dados são definidas através
de um JSON que pode ser gerado utilizando do Portal API.
http://pclima.inpe.br/analise/API
versão do Python em que foi testada: 3.6
exemplo de uso da API
Token: consultar a documentação para a geração do Token
import api as api
Client = api.Client()
data = Client.getData(
{ "formato": "CSV", "conjunto": "PR0002", "modelo": "MO0003", "experimento": "EX0003", "periodo": "PE0000", "cenario": "CE0001", "variavel": "VR0001", "frequenciaURL": "Mensal", "frequencia": "FR0003", "produto": "PDT0001", "localizacao": "Ponto", "localizacao_pontos": "-23.56/-46.62", "varCDO": "tasmax" }
)
Client.save(data,"file.csv")
"""
import os
import json
from pclima.factory import RequestFactory
class Client(object):
"""
Classe utilizada para criar um Cliente de acesso a API.
Attributes
----------
token : str
Definido no arquivo ~/.pclimaAPIrc
format : str
Definido quando deseja um download
"""
def __init__(self, token=os.environ.get("API_TOKEN"),):
"""
Parameters
----------
token : str
Chave de acesso aos serviços da API
"""
self.token = token
self.format = None
dotrc = os.environ.get("PCLIMAAPI_RC", os.path.expanduser("~/.pclimaAPIrc"))
if token is None:
if os.path.exists(dotrc):
config = read_config(dotrc)
if token is None:
token = config.get("token")
if token is None:
print("Missing/incomplete configuration file: %s" % (dotrc))
raise SystemExit
self.token = token
def getData(self,apiJSON):
"""
Method
-------
O Método envia o JSON e retorna os dados desejados.
Parameters
----------
apiJSON : json
JSON com as opções escolhidas
Returns
-------
retorno depende do formato escolhido:
Formato Retorno:
NetCDF XArray
CSV DataFrame
JSON DataFrame
CSVPontos DataFrame
CSVPontosTransposta DataFrame
"""
j = json.loads(json.dumps(apiJSON))
print(j)
self.format = j["formato"]
factory = RequestFactory()
product = factory.get_order(j["formato"],self.token,j)
print(product)
return (product.download())
def save(self,content,file):
"""
Method
-------
O Método decebe a recuperacao do dado e o nome do arquivo
se saída.
Parameters
----------
content : formato desejado
Dados recuperados
file : nome do arquivo de saída
Nome do arquivo de saída Ex.: "saida.csv"
"""
factory = RequestFactory()
factory.save(self.format,content,file)
def read_config(path):
config = {}
with | open(path) as f:
for l in f.readlines():
if ":" in l:
k, v = l.strip().split(":", 1)
if k in ("token"):
config[k] = v.strip()
return config
|
|
rux-icon-local-hospital.tsx | import { Component, Prop, h } from '@stencil/core'
import svgIcon from '../../../icons/local-hospital.svg'
/**
* WARNING: This is an autogenerated component.
* Do not make any changes to this file or they will be overwritten on build.
* The template for this file is located in the generate-icons.js util file.
* /
/** @internal **/
@Component({
tag: 'rux-icon-local-hospital',
shadow: false,
})
export class | {
/**
* The fill color for the icon
*/
@Prop() color?: string
/**
* The size of the icon. Can be 'extra-small', 'small', 'normal', 'large', 'auto' or any custom value ('30px', '1rem', '3.321em')
*/
@Prop() size:
| 'extra-small'
| 'small'
| 'normal'
| 'large'
| 'auto'
| string = 'auto'
get iconSize() {
const sizes: { [key: string]: any } = {
'extra-small': '1rem',
small: '2rem',
normal: '3rem',
large: '4rem',
}
if (sizes[this.size]) {
return sizes[this.size]
} else {
return this.size
}
}
render() {
const style = {
fill: this.color,
height: this.iconSize,
width: this.iconSize,
}
return <div style={style} innerHTML={svgIcon}></div>
}
}
| RuxIconLocalHospital |
main.rs | use canteen;
use rusty_leveldb;
struct KVService {
db: rusty_leveldb::DB,
}
static mut STORAGE_SERVICE: Option<std::sync::Mutex<KVService>> = None;
impl KVService {
fn handle_get(&mut self, req: &canteen::Request) -> canteen::Response {
let key: String = req.get("key");
let val = self.db.get(key.as_bytes());
let mut rp = canteen::Response::new();
rp.set_status(200);
rp.set_content_type("text/plain");
if let Some(val) = val {
rp.append(val);
} else {
rp.set_status(404);
}
rp
}
fn handle_put(&mut self, req: &canteen::Request) -> canteen::Response {
let mut rp = canteen::Response::new();
let key: String = req.get("key");
let val = &req.payload;
self.db.put(key.as_bytes(), val.as_ref()).unwrap();
rp.set_status(200);
rp.set_content_type("text/plain");
rp
}
}
fn get_key_fn(rq: &canteen::Request) -> canteen::Response {
unsafe {
STORAGE_SERVICE
.as_ref()
.unwrap()
.lock()
.unwrap()
.handle_get(rq)
}
}
fn put_key_fn(rq: &canteen::Request) -> canteen::Response {
unsafe { | .lock()
.unwrap()
.handle_put(rq)
}
}
fn main() {
let db = rusty_leveldb::DB::open("httpdb", rusty_leveldb::Options::default()).unwrap();
let service = KVService { db: db };
unsafe { STORAGE_SERVICE = Some(std::sync::Mutex::new(service)) };
let mut ct = canteen::Canteen::new();
ct.add_route("/kvs/get/<str:key>", &[canteen::Method::Get], get_key_fn);
ct.add_route(
"/kvs/put/<str:key>",
&[canteen::Method::Put, canteen::Method::Post],
put_key_fn,
);
ct.bind("0.0.0.0:8080");
ct.run()
} | STORAGE_SERVICE
.as_ref()
.unwrap() |
home.ts | import { Component,NgZone } from '@angular/core';
import { NavController ,NavParams ,Slides,Content, } from 'ionic-angular';
import { Http } from '@angular/http';
import 'rxjs/add/operator/map';
import {Observable} from 'rxjs/Observable';
import { ViewChild } from '@angular/core';
// import { Storage } from 'ionic-framework/ionic';
import {Storage} from '@ionic/storage';
import {ContactPage} from '../../pages/contact/contact';
import {LocationsPage} from '../../pages/locations/locations';
import {addtocardPage} from '../../pages/addtocard/addtocard';
import {GallaryPage} from '../../pages/gallary/gallary';
import {CartPage} from '../../pages/cartpage/cartpage';
import {AuthProvider } from '../../providers/auth';
// import { NativeStorage } from '@ionic-native/native-storage';
import {CapitalizePipe} from '../../pipes/cap.pipe';
import {MyFilterPipe} from '../../pipes/json.pipe';
import {ServiceClass} from '../../providers/servicee';
import { ModalController } from 'ionic-angular';
@Component({
templateUrl: 'home.html',
selector: 'home',
})
export class | {
@ViewChild(Content) content: Content;
@ViewChild('pageSlider') pageSlider: Slides;
@ViewChild(addtocardPage) addtocartdet: addtocardPage;
items:any;
name = 'john doe';
tabs: any = '0';
fapi :any;
fapi2: any;
fapi3:any;
restjson :any;
cartcount:any ;
recievedctr:any=0;
errorMessage:any;
baseurl:any;
private rootPage;
private contactpage;
public already;
private locationspage;
private gallarypage;
public scrollAmount = 0;
public token :any;
constructor( public auth:AuthProvider, public zone: NgZone, public dataservice: ServiceClass ,private nav: NavController,public storage: Storage ,public navparam: NavParams,public http:Http,public modalCtrl: ModalController) {
// this.rootPage = HomePage;
this.cartcount = this.dataservice.globalVar;
// alert(this.content.contentHeight);
// this.cartcount = this.dataservice.cartcount;
this.contactpage = ContactPage;
this.gallarypage = GallaryPage;
this.locationspage = LocationsPage;
// this.already = true;
this.recievedctr = this.navparam.get("ctr");
// this.cartcount = this.cartcount + this.recievedctr;
// storage.set('cartcount',this.cartcount);
// storage.set('already',true);
// storage.get('already').then((val) => {
// console.log('visited', val);
// this.already = val;
// });
storage.get('cartcount').then((val) => {
console.log('Your age is', val);
// this.cartcount = val;
// this.cartcount = this.cartcount ;
});
}
presentModal( category,subcategory,dishorder) {
let xorder:any;
console.log(category + "category " + subcategory + "subcategory" + dishorder + "dishorder" );
if (dishorder > 40){
xorder = this.fapi3[category].category_items[subcategory];
}
else {
xorder = this.fapi3[category].sub_categories[subcategory].category_items[dishorder];
}
console.log( xorder);
if (this.already){
let modal = this.modalCtrl.create(LocationsPage,{"resname": this.restjson });
modal.present();
this.storage.set('already',false);
}
else{
let modal = this.modalCtrl.create(addtocardPage,{"resname": this.restjson,"dish":xorder});
modal.present();
// this.already= true;
}
}
// updatestorage(store){
// this.storage.set('cartcount',store);
// }
ngOnInit() {
//this.dataservice.globalVarUpdate().subscribe(data = >)
// this.storage.set('cartcount','10');
this.dataservice.getmenuitems(this.dataservice.token)
.subscribe(menuitems =>{ this.fapi2 = menuitems.menu[0].sub_categories[0].category_items,
console.log(this.fapi2),this.fapi3 = menuitems.menu,
this.storage.set('menuitems',menuitems.menu)
}
);
this.baseurl= 'http://dc8l3mwto1qll.cloudfront.net/assets/munch_images/' + "rnymn06237/thumb/";
console.log(this.fapi3);
// this.http.get('assets/restjson.json').map(res=> res.json()).subscribe(data3 =>{
// this.restjson = data3;
// console.log(data3);
// })
// this.http.get('assets/restjson.json')//, options)
// .map((resp) => {
// console.log("mock data" + resp.json());
// // return response.json();
// })
this.http.get('assets/restjson.json').map(res=> res.json()).subscribe(data3 =>{
this.restjson = data3;
})
}
openPage(p) {
this.nav.push(p);
}
logout(){
this.auth.logout();
}
openCheckout = ()=> { this.nav.push(CartPage) }
changeWillSlide($event) {
this.tabs = $event._snapIndex.toString();
}
selectTab(index) {
this.pageSlider.slideTo(index);
}
// getorderdetail(){
// return this.http.get('http://api.munchado.in/wapi/restaurant/details/'+59139+'?token=312839b3cdc263cd447566859238db60').map(res=> res.json()).subscribe(data3 =>{
// // this. = data3;
// }
// getrestrauntjson(){
// return this.http.get('assets/ordersummary.json')//, options)
// .map((response: Response) => {
// console.log("mock data" + response.json());
// return response.json();
// }
// )
// .catch(this.handleError);
// }
getItems(ev) {
// Reset items back to all of the items
// this.initializeItems();
console.log(ev);
this.items = this.fapi3;
// set val to the value of the ev target
var val = ev.target.value;
// if the value is an empty string don't filter the items
if (val && val.trim() != '') {
this.items = this.items.filter((item) => {
return (item.category_name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
}
}
scrollHandler(event) {
console.log(`ScrollEvent: ${event}`)
this.zone.run(()=>{
// since scrollAmount is data-binded,
// the update needs to happen in zone
this.scrollAmount++;
console.log( 'scrooll' + this.scrollAmount);
})
}
}
| HomePage |
locales.rs | use pure_rust_locales::{locale_match, Locale};
pub(crate) fn short_months(locale: Locale) -> &'static [&'static str] {
locale_match!(locale => LC_TIME::ABMON)
}
pub(crate) fn long_months(locale: Locale) -> &'static [&'static str] {
locale_match!(locale => LC_TIME::MON)
}
pub(crate) fn short_weekdays(locale: Locale) -> &'static [&'static str] {
locale_match!(locale => LC_TIME::ABDAY)
}
pub(crate) fn long_weekdays(locale: Locale) -> &'static [&'static str] {
locale_match!(locale => LC_TIME::DAY)
}
pub(crate) fn am_pm(locale: Locale) -> &'static [&'static str] {
locale_match!(locale => LC_TIME::AM_PM)
}
pub(crate) fn d_fmt(locale: Locale) -> &'static str {
locale_match!(locale => LC_TIME::D_FMT) | }
pub(crate) fn d_t_fmt(locale: Locale) -> &'static str {
locale_match!(locale => LC_TIME::D_T_FMT)
}
pub(crate) fn t_fmt(locale: Locale) -> &'static str {
locale_match!(locale => LC_TIME::T_FMT)
} | |
fixnum.rs | use std::convert::TryFrom;
use crate::convert::{Convert, TryConvert};
use crate::sys;
use crate::types::{Int, Ruby, Rust};
use crate::value::Value;
use crate::{Artichoke, ArtichokeError};
impl Convert<u8, Value> for Artichoke {
fn convert(&self, value: u8) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
impl Convert<u16, Value> for Artichoke {
fn convert(&self, value: u16) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
#[cfg(not(target_arch = "wasm32"))]
impl Convert<u32, Value> for Artichoke {
fn convert(&self, value: u32) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
#[cfg(target_arch = "wasm32")]
impl TryConvert<u32, Value> for Artichoke {
fn try_convert(&self, value: u32) -> Result<Value, ArtichokeError> {
let value = Int::try_from(value).map_err(|_| ArtichokeError::ConvertToRuby {
from: Rust::UnsignedInt,
to: Ruby::Fixnum,
})?;
Ok(Value::new(self, unsafe {
sys::mrb_sys_fixnum_value(value)
}))
}
}
impl TryConvert<u64, Value> for Artichoke {
fn | (&self, value: u64) -> Result<Value, ArtichokeError> {
let value = Int::try_from(value).map_err(|_| ArtichokeError::ConvertToRuby {
from: Rust::UnsignedInt,
to: Ruby::Fixnum,
})?;
Ok(Value::new(self, unsafe {
sys::mrb_sys_fixnum_value(value)
}))
}
}
impl Convert<i8, Value> for Artichoke {
fn convert(&self, value: i8) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
impl Convert<i16, Value> for Artichoke {
fn convert(&self, value: i16) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
impl Convert<i32, Value> for Artichoke {
fn convert(&self, value: i32) -> Value {
let value = Int::from(value);
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
#[cfg(not(target_arch = "wasm32"))]
impl Convert<i64, Value> for Artichoke {
fn convert(&self, value: i64) -> Value {
Value::new(self, unsafe { sys::mrb_sys_fixnum_value(value) })
}
}
#[cfg(target_arch = "wasm32")]
impl TryConvert<i64, Value> for Artichoke {
fn try_convert(&self, value: i64) -> Result<Value, ArtichokeError> {
let value = Int::try_from(value).map_err(|_| ArtichokeError::ConvertToRuby {
from: Rust::UnsignedInt,
to: Ruby::Fixnum,
})?;
Ok(Value::new(self, unsafe {
sys::mrb_sys_fixnum_value(value)
}))
}
}
impl TryConvert<Value, Int> for Artichoke {
fn try_convert(&self, value: Value) -> Result<Int, ArtichokeError> {
match value.ruby_type() {
Ruby::Fixnum => {
let value = value.inner();
Ok(unsafe { sys::mrb_sys_fixnum_to_cint(value) })
}
type_tag => Err(ArtichokeError::ConvertToRust {
from: type_tag,
to: Rust::SignedInt,
}),
}
}
}
impl TryConvert<Value, usize> for Artichoke {
fn try_convert(&self, value: Value) -> Result<usize, ArtichokeError> {
let value: Int = self
.try_convert(value)
.map_err(|_| ArtichokeError::ConvertToRust {
from: Ruby::Fixnum,
to: Rust::UnsignedInt,
})?;
usize::try_from(value).map_err(|_| ArtichokeError::ConvertToRust {
from: Ruby::Fixnum,
to: Rust::UnsignedInt,
})
}
}
#[cfg(test)]
mod tests {
use artichoke_core::eval::Eval;
use quickcheck_macros::quickcheck;
use crate::convert::Convert;
use crate::sys;
use crate::types::{Int, Ruby, Rust};
use crate::value::{Value, ValueLike};
use crate::ArtichokeError;
#[test]
fn fail_convert() {
let interp = crate::interpreter().expect("init");
// get a mrb_value that can't be converted to a primitive type.
let value = interp.eval(b"Object.new").expect("eval");
let expected = Err(ArtichokeError::ConvertToRust {
from: Ruby::Object,
to: Rust::SignedInt,
});
let result = value.try_into::<Int>();
assert_eq!(result, expected);
}
#[quickcheck]
fn convert_to_fixnum(i: Int) -> bool {
let interp = crate::interpreter().expect("init");
let value = interp.convert(i);
value.ruby_type() == Ruby::Fixnum
}
#[quickcheck]
fn fixnum_with_value(i: Int) -> bool {
let interp = crate::interpreter().expect("init");
let value = interp.convert(i);
let inner = value.inner();
let cint = unsafe { sys::mrb_sys_fixnum_to_cint(inner) };
cint == i
}
#[quickcheck]
fn roundtrip(i: Int) -> bool {
let interp = crate::interpreter().expect("init");
let value = interp.convert(i);
let value = value.try_into::<Int>().expect("convert");
value == i
}
#[quickcheck]
fn roundtrip_err(b: bool) -> bool {
let interp = crate::interpreter().expect("init");
let value = interp.convert(b);
let value = value.try_into::<Int>();
let expected = Err(ArtichokeError::ConvertToRust {
from: Ruby::Bool,
to: Rust::SignedInt,
});
value == expected
}
#[test]
fn fixnum_to_usize() {
let interp = crate::interpreter().expect("init");
let value: Value = interp.convert(100);
let value = value.try_into::<usize>();
let expected = Ok(100);
assert_eq!(value, expected);
let value: Value = interp.convert(-100);
let value = value.try_into::<usize>();
let expected = Err(ArtichokeError::ConvertToRust {
from: Ruby::Fixnum,
to: Rust::UnsignedInt,
});
assert_eq!(value, expected);
}
}
| try_convert |
gen-shaping-tests.py | #!/usr/bin/env python
import os
import sys
import subprocess
from pathlib import Path
# There is no sane way to test them.
IGNORE_TESTS = [
'macos.tests',
]
IGNORE_TEST_CASES = [
# aots tests
# in-house tests
# --shaper=fallback is not supported.
'simple_002',
# Not possible to implement without shaping.
'arabic_fallback_shaping_001',
# `dfont` is not supported.
'collections_001',
'collections_002',
'collections_003',
# Face index out of bounds. ttf-parser doesn't permit this.
'collections_006',
# no `hhea` table.
'indic_decompose_001',
# ttf-parser doesn't support phantom points
'variations_space_001',
# text-rendering-tests tests
# Unknown issue. Investigate.
'cmap_1_004',
'shknda_3_031',
'shlana_10_028',
'shlana_10_041',
'shlana_5_010',
'shlana_5_012',
]
def update_relative_path(tests_name, fontfile):
|
# Converts `U+0041,U+0078` into `\u{0041}\u{0078}`
def convert_unicodes(unicodes):
text = ''
for (i, u) in enumerate(unicodes.split(',')):
if i > 0 and i % 10 == 0:
text += '\\\n '
text += f'\\u{{{u[2:]}}}'
return text
def convert_test(hb_dir, hb_shape_exe, tests_name, file_name, idx, data, fonts):
fontfile, options, unicodes, glyphs_expected = data.split(':')
fontfile_rs = update_relative_path(tests_name, fontfile)
unicodes_rs = convert_unicodes(unicodes)
test_name = file_name.replace('.tests', '').replace('-', '_') + f'_{idx:03d}'
test_name = test_name.lower()
options = options.replace('--shaper=ot', '')
options = options.replace(' --font-funcs=ft', '').replace('--font-funcs=ft', '')
options = options.replace(' --font-funcs=ot', '').replace('--font-funcs=ot', '')
options = options.replace('--font-size=1000', '') # we don't support font scaling
options = options.strip()
# We have to actually run hb-shape instead of using predefined results,
# because hb sometimes stores results for freetype and not for embedded OpenType
# engine, which we are using.
# Right now, it only affects 'text-rendering-tests'.
if len(options) != 0:
options_list = options.split(' ')
else:
options_list = []
options_list.insert(0, str(hb_shape_exe))
# Force OT functions, since this is the only one we support in rustybuzz.
options_list.append('--font-funcs=ot')
abs_font_path = hb_dir.joinpath('test/shaping/data')\
.joinpath(tests_name)\
.joinpath('tests') \
.joinpath(fontfile)
options_list.append(str(abs_font_path))
options_list.append(f'--unicodes={unicodes}') # no need to escape it
glyphs_expected = subprocess.run(options_list, check=True, stdout=subprocess.PIPE)\
.stdout.decode()
glyphs_expected = glyphs_expected[1:-2] # remove `[..]\n`
glyphs_expected = glyphs_expected.replace('|', '|\\\n ')
options = options.replace('"', '\\"')
fonts.add(os.path.split(fontfile_rs)[1])
if test_name in IGNORE_TEST_CASES:
return ''
return (f'#[test]\n'
f'fn {test_name}() {{\n'
f' assert_eq!(\n'
f' shape(\n'
f' "{fontfile_rs}",\n'
f' "{unicodes_rs}",\n'
f' "{options}",\n'
f' ),\n'
f' "{glyphs_expected}"\n'
f' );\n'
f'}}\n'
'\n')
def convert(hb_dir, hb_shape_exe, tests_dir, tests_name):
files = sorted(os.listdir(tests_dir))
files = [f for f in files if f.endswith('.tests')]
fonts = set()
rust_code = ('// WARNING: this file was generated by ../scripts/gen-shaping-tests.py\n'
'\n'
'mod shaping_impl;\n'
'use shaping_impl::shape;\n'
'\n')
for file in files:
if file in IGNORE_TESTS:
continue
with open(tests_dir / file, 'r') as f:
for idx, test in enumerate(f.read().splitlines()):
# skip comments and empty lines
if test.startswith('#') or len(test) == 0:
continue
rust_code += convert_test(hb_dir, hb_shape_exe, tests_name,
file, idx + 1, test, fonts)
tests_name_snake_case = tests_name.replace('-', '_')
with open(f'../tests/shaping_{tests_name_snake_case}.rs', 'w') as f:
f.write(rust_code)
return fonts
if len(sys.argv) != 2:
print('Usage: gen-shaping-tests.py /path/to/harfbuzz-src')
exit(1)
hb_dir = Path(sys.argv[1])
assert hb_dir.exists()
# Check that harfbuzz was built.
hb_shape_exe = hb_dir.joinpath('builddir/util/hb-shape')
if not hb_shape_exe.exists():
print('Build harfbuzz first using:')
print(' meson builddir')
print(' ninja -Cbuilddir')
exit(1)
used_fonts = []
font_files = []
test_dir_names = ['aots', 'in-house', 'text-rendering-tests']
for test_dir_name in test_dir_names:
tests_dir = hb_dir / f'test/shaping/data/{test_dir_name}/tests'
used_fonts += convert(hb_dir, hb_shape_exe, tests_dir, test_dir_name)
font_files += os.listdir(hb_dir / f'test/shaping/data/{test_dir_name}/fonts')
# Check for unused fonts.
unused_fonts = sorted(list(set(font_files).difference(used_fonts)))
if len(unused_fonts) != 0:
print('Unused fonts:')
for font in unused_fonts:
print(font)
| fontfile = fontfile.replace('../fonts/', '')
return f'tests/fonts/{tests_name}/{fontfile}' # relative to the root dir |
reader.go | // Copyright 2021 Optakt Labs OÜ
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package index
import (
"errors"
"fmt"
"github.com/dgraph-io/badger/v2"
"github.com/onflow/flow-go/ledger"
"github.com/onflow/flow-go/model/flow"
"github.com/optakt/flow-dps/models/dps"
)
// Reader implements the `index.Reader` interface on top of the DPS server's
// Badger database index.
type Reader struct {
db *badger.DB
lib dps.ReadLibrary
}
// NewReader creates a new index reader, using the given database as the
// underlying state repository. It is recommended to provide a read-only Badger
// database.
func NewReader(db *badger.DB, lib dps.ReadLibrary) *Reader { |
// First returns the height of the first finalized block that was indexed.
func (r *Reader) First() (uint64, error) {
var height uint64
err := r.db.View(r.lib.RetrieveFirst(&height))
return height, err
}
// Last returns the height of the last finalized block that was indexed.
func (r *Reader) Last() (uint64, error) {
var height uint64
err := r.db.View(r.lib.RetrieveLast(&height))
return height, err
}
// HeightForBlock returns the height for the given block identifier.
func (r *Reader) HeightForBlock(blockID flow.Identifier) (uint64, error) {
var height uint64
err := r.db.View(r.lib.LookupHeightForBlock(blockID, &height))
return height, err
}
// Commit returns the commitment of the execution state as it was after the
// execution of the finalized block at the given height.
func (r *Reader) Commit(height uint64) (flow.StateCommitment, error) {
var commit flow.StateCommitment
err := r.db.View(r.lib.RetrieveCommit(height, &commit))
return commit, err
}
// Header returns the header for the finalized block at the given height.
func (r *Reader) Header(height uint64) (*flow.Header, error) {
var header flow.Header
err := r.db.View(r.lib.RetrieveHeader(height, &header))
return &header, err
}
// Values returns the Ledger values of the execution state at the given paths
// as they were after the execution of the finalized block at the given height.
// For compatibility with existing Flow execution node code, a path that is not
// found within the indexed execution state returns a nil value without error.
func (r *Reader) Values(height uint64, paths []ledger.Path) ([]ledger.Value, error) {
first, err := r.First()
if err != nil {
return nil, fmt.Errorf("could not check first height: %w", err)
}
last, err := r.Last()
if err != nil {
return nil, fmt.Errorf("could not check last height: %w", err)
}
if height < first || height > last {
return nil, fmt.Errorf("invalid height (given: %d, first: %d, last: %d)", height, first, last)
}
values := make([]ledger.Value, 0, len(paths))
err = r.db.View(func(tx *badger.Txn) error {
for _, path := range paths {
var payload ledger.Payload
err := r.lib.RetrievePayload(height, path, &payload)(tx)
if errors.Is(err, badger.ErrKeyNotFound) {
values = append(values, nil)
continue
}
if err != nil {
return fmt.Errorf("could not retrieve payload (path: %x): %w", path, err)
}
values = append(values, payload.Value)
}
return nil
})
return values, err
}
// Collection returns the collection with the given ID.
func (r *Reader) Collection(collID flow.Identifier) (*flow.LightCollection, error) {
var collection flow.LightCollection
err := r.db.View(r.lib.RetrieveCollection(collID, &collection))
return &collection, err
}
// CollectionsByHeight returns the collection IDs at the given height.
func (r *Reader) CollectionsByHeight(height uint64) ([]flow.Identifier, error) {
var collIDs []flow.Identifier
err := r.db.View(r.lib.LookupCollectionsForHeight(height, &collIDs))
return collIDs, err
}
// Guarantee returns the guarantee with the given collection ID.
func (r *Reader) Guarantee(collID flow.Identifier) (*flow.CollectionGuarantee, error) {
var collection flow.CollectionGuarantee
err := r.db.View(r.lib.RetrieveGuarantee(collID, &collection))
return &collection, err
}
// Transaction returns the transaction with the given ID.
func (r *Reader) Transaction(txID flow.Identifier) (*flow.TransactionBody, error) {
var transaction flow.TransactionBody
err := r.db.View(r.lib.RetrieveTransaction(txID, &transaction))
return &transaction, err
}
// HeightForTransaction returns the height of the block within which the given
// transaction identifier is.
func (r *Reader) HeightForTransaction(txID flow.Identifier) (uint64, error) {
var height uint64
err := r.db.View(r.lib.LookupHeightForTransaction(txID, &height))
return height, err
}
// TransactionsByHeight returns the transaction IDs within the block with the given ID.
func (r *Reader) TransactionsByHeight(height uint64) ([]flow.Identifier, error) {
var txIDs []flow.Identifier
err := r.db.View(r.lib.LookupTransactionsForHeight(height, &txIDs))
return txIDs, err
}
// Result returns the transaction result for the given transaction ID.
func (r *Reader) Result(txID flow.Identifier) (*flow.TransactionResult, error) {
var result flow.TransactionResult
err := r.db.View(r.lib.RetrieveResult(txID, &result))
return &result, err
}
// Events returns the events of all transactions that were part of the
// finalized block at the given height. It can optionally filter them by event
// type; if no event types are given, all events are returned.
func (r *Reader) Events(height uint64, types ...flow.EventType) ([]flow.Event, error) {
first, err := r.First()
if err != nil {
return nil, fmt.Errorf("could not check first height: %w", err)
}
last, err := r.Last()
if err != nil {
return nil, fmt.Errorf("could not check last height: %w", err)
}
if height < first || height > last {
return nil, fmt.Errorf("invalid height (given: %d, first: %d, last: %d)", height, first, last)
}
var events []flow.Event
err = r.db.View(r.lib.RetrieveEvents(height, types, &events))
if err != nil {
return nil, fmt.Errorf("could not retrieve events: %w", err)
}
return events, nil
}
// Seal returns the seal with the given ID.
func (r *Reader) Seal(sealID flow.Identifier) (*flow.Seal, error) {
var seal flow.Seal
err := r.db.View(r.lib.RetrieveSeal(sealID, &seal))
return &seal, err
}
// SealsByHeight returns all of the seals that were part of the finalized block at the given height.
func (r *Reader) SealsByHeight(height uint64) ([]flow.Identifier, error) {
var sealIDs []flow.Identifier
err := r.db.View(r.lib.LookupSealsForHeight(height, &sealIDs))
return sealIDs, err
}
|
r := Reader{
db: db,
lib: lib,
}
return &r
}
|
schema.rs | table! {
block_stats (block_id) {
block_id -> Int4,
block_size -> Int4,
first_bytes -> Bytea,
}
}
table! {
blocks (id) {
id -> Int4,
base32_cidv1 -> Text,
codec_id -> Int4,
}
}
table! {
codecs (id) {
id -> Int4,
name -> Text,
}
}
table! {
errors (id) { | name -> Text,
}
}
table! {
failed_resolves (block_id) {
block_id -> Int4,
error_id -> Int4,
ts -> Timestamp,
}
}
table! {
successful_resolves (block_id) {
block_id -> Int4,
ts -> Timestamp,
}
}
table! {
unixfs_blocks (block_id) {
block_id -> Int4,
unixfs_type_id -> Int4,
size -> Int8,
cumulative_size -> Int8,
blocks -> Int4,
num_links -> Int4,
}
}
table! {
unixfs_file_heuristics (block_id) {
block_id -> Int4,
tree_mime_mime_type -> Nullable<Text>,
chardet_encoding -> Nullable<Text>,
chardet_language -> Nullable<Text>,
chardet_confidence -> Nullable<Float4>,
chardetng_encoding -> Nullable<Text>,
whatlang_language -> Nullable<Text>,
whatlang_script -> Nullable<Text>,
whatlang_confidence -> Nullable<Float8>,
}
}
table! {
unixfs_links (parent_block_id, referenced_base32_cidv1, name) {
parent_block_id -> Int4,
referenced_base32_cidv1 -> Text,
name -> Text,
size -> Int8,
}
}
table! {
unixfs_types (id) {
id -> Int4,
name -> Text,
}
}
joinable!(block_stats -> blocks (block_id));
joinable!(blocks -> codecs (codec_id));
joinable!(failed_resolves -> blocks (block_id));
joinable!(failed_resolves -> errors (error_id));
joinable!(successful_resolves -> blocks (block_id));
joinable!(unixfs_blocks -> block_stats (block_id));
joinable!(unixfs_blocks -> unixfs_types (unixfs_type_id));
joinable!(unixfs_file_heuristics -> unixfs_blocks (block_id));
joinable!(unixfs_links -> unixfs_blocks (parent_block_id));
allow_tables_to_appear_in_same_query!(
block_stats,
blocks,
codecs,
errors,
failed_resolves,
successful_resolves,
unixfs_blocks,
unixfs_file_heuristics,
unixfs_links,
unixfs_types,
); | id -> Int4, |
tour.py | # -*- coding: utf-8 -*-
""" Sahana Eden Guided Tour Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@todo: update for new template path modules/template
"""
__all__ = ("S3GuidedTourModel",
"tour_rheader",
"tour_builder",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3GuidedTourModel(S3Model):
""" Details about which guided tours this Person has completed """
names = ("tour_config",
"tour_details",
"tour_user",
)
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
s3 = current.response.s3
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
person_id = self.pr_person_id
# ---------------------------------------------------------------------
# Guided tours that are available
#
tablename = "tour_config"
define_table(tablename,
Field("name",
represent=lambda v: v or NONE,
label=T("Display name"),
requires = IS_NOT_EMPTY(),
),
Field("code",
length=255,
notnull=True,
unique=True,
represent=lambda v: v or NONE,
label=T("Unique code")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller tour is activated")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function tour is activated")),
Field("autostart", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Auto start")),
Field("role", "string",
represent=lambda v: v or NONE,
label=T("User's role")),
* s3_meta_fields()
)
# CRUD strings
ADD_TOUR = T("Create Tour")
crud_strings[tablename] = Storage(
label_create = ADD_TOUR,
title_display = T("Tour Configuration"),
title_list = T("Tours"),
title_update = T("Edit Tour"),
label_list_button = T("List Tours"),
label_delete_button = T("Delete Tour"),
msg_record_created = T("Tour added"),
msg_record_modified = T("Tour updated"),
msg_record_deleted = T("Tour deleted"),
msg_list_empty = T("No Tours currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
tour_config_id = S3ReusableField("tour_config_id", "reference %s" % tablename,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "tour_config.id",
represent,
sort=True)),
represent=represent,
label=T("Tour Name"),
ondelete="SET NULL")
# Components
add_components(tablename,
# Details
tour_details="tour_config_id",
# Users
tour_user="tour_config_id",
)
# ---------------------------------------------------------------------
# Details of the tour.
#
tablename = "tour_details"
define_table(tablename,
tour_config_id(empty = False),
Field("posn", "integer",
default=0,
label=T("Position in tour")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller name")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function name")),
Field("args",
represent=lambda v: v or NONE,
label=T("Arguments")),
Field("tip_title",
represent=lambda v: v or NONE,
label=T("Title")),
Field("tip_details",
represent=lambda v: v or NONE,
label=T("Details")),
Field("html_id",
represent=lambda v: v or NONE,
label=T("HTML ID")),
Field("html_class",
represent=lambda v: v or NONE,
label=T("HTML class")),
Field("button",
represent=lambda v: v or NONE,
label=T("Button name")),
Field("tip_location",
represent=lambda v: v or NONE,
label=T("Loctaion of tip")),
Field("datatable_id",
represent=lambda v: v or NONE,
label=T("DataTable ID")),
Field("datatable_row",
represent=lambda v: v or NONE,
label=T("DataTable row")),
Field("redirect",
represent=lambda v: v or NONE,
label=T("Redirect URL")),
)
# CRUD strings
ADD_DETAILS = T("Create Details")
crud_strings[tablename] = Storage(
label_create = ADD_DETAILS,
title_display = T("Tour Details"),
title_list = T("Details"),
title_update = T("Edit Details"),
label_list_button = T("List Details"),
label_delete_button = T("Delete Detail"),
msg_record_created = T("Detail added"),
msg_record_modified = T("Detail updated"),
msg_record_deleted = T("Detail deleted"),
msg_list_empty = T("No Details currently registered"))
configure(tablename,
orderby = "tour_details.tour_config_id,tour_details.posn"
)
# ---------------------------------------------------------------------
# Details of the tours that the user has taken.
#
tablename = "tour_user"
define_table(tablename,
person_id(label = T("Person"),
ondelete="CASCADE",
empty = False,
),
tour_config_id(),
Field("place",
represent=lambda v: v or NONE,
label=T("Where reached")),
Field("resume",
represent=lambda v: v or NONE,
label=T("URL to resume tour")),
Field("completed", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Completed tour?")),
Field("trip_counter", "integer",
default=0,
label=T("Times Completed")),
)
# CRUD strings
ADD_USER = T("Create User")
crud_strings[tablename] = Storage(
label_create = ADD_USER,
title_display = T("Tour User"),
title_list = T("Users"),
title_update = T("Edit User"),
label_list_button = T("List Users"),
label_delete_button = T("Delete User"),
msg_record_created = T("User added"),
msg_record_modified = T("User updated"),
msg_record_deleted = T("User deleted"),
msg_list_empty = T("No users have taken a tour"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(tour_config_id = tour_config_id,
)
# =============================================================================
def tour_rheader(r):
|
# =============================================================================
def tour_builder(output):
"""
Helper function to attach a guided tour (if required) to the output
"""
auth = current.auth
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
T = current.T
req_vars = request.vars
tour_id = req_vars.tour
# Now see if the details are on the database for this user
tour = None
user_id = None
if auth.is_logged_in():
user_id = auth.s3_logged_in_person()
# Find out if the user has done this tour before
utable = s3db.tour_user
uquery = (utable.person_id == user_id) & \
(utable.tour_config_id == tour_id)
tour = db(uquery).select(utable.id,
utable.completed,
utable.place,
utable.resume,
limitby=(0, 1)).first()
# If the tour has just been started (from the menu) then
# it may be necessary to redirect to a different controller
# @todo: does place need to be changed to controller and function?
if not req_vars.tour_running:
if (tour and not tour.completed and tour.place != request.controller):
redirect("%s?tour=%s" %(tour.resume, tour_id))
# get the details from the database
dtable = s3db.tour_details
dquery = (dtable.tour_config_id == tour_id) &\
(dtable.controller == request.controller) &\
(dtable.function == request.function)
details = db(dquery).select(dtable.args,
dtable.tip_title,
dtable.tip_details,
dtable.button,
dtable.tip_location,
dtable.html_id,
dtable.html_class,
dtable.datatable_id,
dtable.datatable_row,
dtable.redirect,
orderby=(dtable.posn)
)
# tour_filename = os.path.join(request.folder,
# "private",
# "tour",
# tour_name)
# tour_file = open (tour_filename, "rb")
# # now open the details of the guided_tour into a dictionary
# import csv
# tour_details = csv.DictReader(tour_file, skipinitialspace=True)
# load the list of tour items in the html
joyride_OL = OL(_id="joyrideID_1")
pre_step_data = []
post_step_data = []
post_ride_data = []
last_row = None
last_used = None
req_args = request.args
cnt = -1
for row in details:
if row.args:
args = row.args.split(",")
else:
args = []
# if the page has a nested login form then "login" will be added to
# the req_args list so it needs to be added to the args list as well
if "login" in req_args:
if "login" not in args:
args.append("login")
# The following will capture the actual id used for the req_arg
# Example org/organisation/10, where 10 is the id from the database
posn = 0
for arg in args:
if arg == "dt_id":
args[posn] = req_args[posn]
posn += 1
# Now check that the tour url matches the current url
if (args == req_args):
cnt += 1 # number of records used in this part of the tour
if row.datatable_id:
dt_id = row.datatable_id
# cols = []
# if "DataTable_columns" in row:
# cols = row["DataTable_columns"].split(",")
row_num = 0
if row.datatable_row:
row_num = row.datatable_row
# Now set this up for the pre-processor hook in joyride
pre_step_data.append([cnt, dt_id, row_num])
if row.redirect:
redirect_row = row.redirect.split(",")
if len(redirect_row) >= 3:
url = URL(c=redirect_row[0],
f=redirect_row[1],
args=redirect_row[2:],
vars={"tour_running":True,
"tour":tour_id}
)
if "dt_id" in redirect_row[2]:
post_step_data.append([cnt, url, dt_id, row_num])
elif len(redirect_row) == 2:
url = URL(c=redirect_row[0],
f=redirect_row[1],
vars={"tour_running":True,
"tour":tour_id}
)
post_step_data.append([cnt, url])
else:
url = URL(c=redirect_row[0],vars={"tour_running":True,
"tour":tour_id})
post_step_data.append([cnt, url])
extra = {}
if row.html_id:
extra["_data-id"] = row.html_id
elif row.html_class:
extra["_data-class"] = row.html_class
if row.button:
extra["_data-button"] = row.button
else:
extra["_data-button"] = "Next"
if row.tip_location:
extra["_data-options"] = "tipLocation:%s" % row.tip_location.lower()
else:
extra["_data-options"] = "tipLocation:right"
joyride_OL.append(LI(H2(T(row.tip_title)),
P(T(row.tip_details)),
**extra
)
)
last_used = row
last_row = row
# The following redirect will be triggered if the user has moved away
# from the tour, such as by clicking on a tab. However if a tab
# is part of the tour we are unable to determine if they have moved
# away or just visiting as part of the tour and so it will continue.
if len(joyride_OL) == 0:
del request.vars.tour
redirect(URL(args=req_args,
vars=request.vars))
if (user_id != None) and (last_row == last_used):
# set up an AJAX call to record that the tour has been completed
post_ride_data = [cnt, tour_id]
joyride_div = DIV(joyride_OL,
_class="hidden")
# Add the javascript configuration data
from gluon.serializers import json as jsons
if pre_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="prestep_data",
_name="prestep_data",
_value=jsons(pre_step_data))
)
if post_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="poststep_data",
_name="poststep_data",
_value=jsons(post_step_data))
)
if post_ride_data:
joyride_div.append(INPUT(_type="hidden",
_id="postride_data",
_name="postride_data",
_value=jsons(post_ride_data))
)
# Now add the details to the tour_user table
if user_id != None:
if tour == None:
# this user has never done this tour before so create a new record
utable.insert(person_id = user_id,
tour_config_id = tour_id,
place = request.controller,
resume = request.url)
else:
# the user has done some of this tour so update the record
db(uquery).update(place = request.controller,
resume = request.url,
completed = False)
output["joyride_div"] = joyride_div
if s3.debug:
appname = request.application
s3.scripts.append("/%s/static/scripts/jquery.joyride.js" % appname)
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.js" % appname)
s3.stylesheets.append("plugins/joyride.min.css")
else:
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.min.js" % request.application)
s3.stylesheets.append("plugins/joyride.css")
return output
# END =========================================================================
| """ Resource Header for Guided Tour """
if r.representation == "html":
tour = r.record
if tour:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Details"), "details"),
(T("People"), "user"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
tour.name,
),
TR(TH("%s: " % table.code.label),
tour.code,
),
),
rheader_tabs
)
return rheader
return None |
lib.rs | #![doc = "generated by AutoRust 0.1.0"]
#[cfg(feature = "package-2015-11-preview")]
mod package_2015_11_preview;
#[cfg(feature = "package-2015-11-preview")]
pub use package_2015_11_preview::{models, operations, API_VERSION};
#[cfg(feature = "package-2015-03")]
mod package_2015_03;
#[cfg(feature = "package-2015-03")]
pub use package_2015_03::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-08-preview")]
mod package_2019_08_preview;
#[cfg(feature = "package-2019-08-preview")]
pub use package_2019_08_preview::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-09-preview")]
mod package_2019_09_preview;
#[cfg(feature = "package-2019-09-preview")]
pub use package_2019_09_preview::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-03-preview")]
mod package_2020_03_preview;
#[cfg(feature = "package-2020-03-preview")]
pub use package_2020_03_preview::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-08")]
mod package_2020_08;
#[cfg(feature = "package-2020-08")]
pub use package_2020_08::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-10-only")]
mod package_2020_10_only;
#[cfg(feature = "package-2020-10-only")]
pub use package_2020_10_only::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-10")]
mod package_2020_10;
#[cfg(feature = "package-2020-10")]
pub use package_2020_10::{models, operations, API_VERSION};
#[cfg(feature = "package-2021-06")]
mod package_2021_06;
use azure_core::setters;
#[cfg(feature = "package-2021-06")]
pub use package_2021_06::{models, operations, API_VERSION};
pub fn config(
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
token_credential: Box<dyn azure_core::TokenCredential>,
) -> OperationConfigBuilder {
OperationConfigBuilder {
api_version: None,
http_client,
base_path: None,
token_credential,
token_credential_resource: None,
}
}
pub struct OperationConfigBuilder {
api_version: Option<String>,
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: Option<String>,
token_credential: Box<dyn azure_core::TokenCredential>,
token_credential_resource: Option<String>,
}
impl OperationConfigBuilder {
setters! { api_version : String => Some (api_version) , base_path : String => Some (base_path) , token_credential_resource : String => Some (token_credential_resource) , }
pub fn build(self) -> OperationConfig {
OperationConfig {
api_version: self.api_version.unwrap_or(API_VERSION.to_owned()),
http_client: self.http_client,
base_path: self.base_path.unwrap_or("https://management.azure.com".to_owned()),
token_credential: Some(self.token_credential),
token_credential_resource: self.token_credential_resource.unwrap_or("https://management.azure.com/".to_owned()),
}
}
}
pub struct OperationConfig {
api_version: String,
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: String,
token_credential: Option<Box<dyn azure_core::TokenCredential>>,
token_credential_resource: String,
}
impl OperationConfig {
pub fn api_version(&self) -> &str {
self.api_version.as_str()
}
pub fn http_client(&self) -> &dyn azure_core::HttpClient {
self.http_client.as_ref()
}
pub fn base_path(&self) -> &str {
self.base_path.as_str()
}
pub fn token_credential(&self) -> Option<&dyn azure_core::TokenCredential> {
self.token_credential.as_deref()
}
pub fn | (&self) -> &str {
self.token_credential_resource.as_str()
}
}
| token_credential_resource |
encrypted_key.go | // Copyright 2016 Russell Haering et al.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"bytes"
"crypto/aes"
"crypto/des"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"strings"
)
//EncryptedKey contains the decryption key data from the saml2 core and xmlenc
//standards.
type EncryptedKey struct {
// EncryptionMethod string `xml:"EncryptionMethod>Algorithm"`
X509Data string `xml:"KeyInfo>X509Data>X509Certificate"`
CipherValue string `xml:"CipherData>CipherValue"`
EncryptionMethod EncryptionMethod
}
//EncryptionMethod specifies the type of encryption that was used.
type EncryptionMethod struct {
Algorithm string `xml:",attr,omitempty"`
//Digest method is present for algorithms like RSA-OAEP.
//See https://www.w3.org/TR/xmlenc-core1/.
//To convey the digest methods an entity supports,
//DigestMethod in extensions element is used.
//See http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-metadata-algsupport.html.
DigestMethod *DigestMethod `xml:",omitempty"`
}
//DigestMethod is a digest type specification
type DigestMethod struct {
Algorithm string `xml:",attr,omitempty"`
}
//Well-known public-key encryption methods
const (
MethodRSAOAEP = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
MethodRSAOAEP2 = "http://www.w3.org/2009/xmlenc11#rsa-oaep"
MethodRSAv1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
)
//Well-known private key encryption methods
const (
MethodAES128GCM = "http://www.w3.org/2009/xmlenc11#aes128-gcm"
MethodAES128CBC = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
MethodAES256CBC = "http://www.w3.org/2001/04/xmlenc#aes256-cbc"
MethodTripleDESCBC = "http://www.w3.org/2001/04/xmlenc#tripledes-cbc"
)
//Well-known hash methods
const (
MethodSHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
MethodSHA256 = "http://www.w3.org/2000/09/xmldsig#sha256"
MethodSHA512 = "http://www.w3.org/2000/09/xmldsig#sha512"
)
//SHA-1 is commonly used for certificate fingerprints (openssl -fingerprint and ADFS thumbprint).
//SHA-1 is sufficient for our purposes here (error message).
func | (keyBytes []byte) string {
if len(keyBytes) < 1 {
return ""
}
hashFunc := sha1.New()
hashFunc.Write(keyBytes)
sum := strings.ToLower(hex.EncodeToString(hashFunc.Sum(nil)))
var ret string
for idx := 0; idx+1 < len(sum); idx += 2 {
if idx == 0 {
ret += sum[idx : idx+2]
} else {
ret += ":" + sum[idx:idx+2]
}
}
return ret
}
//DecryptSymmetricKey returns the private key contained in the EncryptedKey document
func (ek *EncryptedKey) DecryptSymmetricKey(cert *tls.Certificate) (cipher.Block, error) {
if len(cert.Certificate) < 1 {
return nil, fmt.Errorf("decryption tls.Certificate has no public certs attached")
}
// The EncryptedKey may or may not include X509Data (certificate).
// If included, the EncryptedKey certificate:
// - is FYI only (fail if it does not match the SP certificate)
// - is NOT used to decrypt CipherData
if ek.X509Data != "" {
if encCert, err := base64.StdEncoding.DecodeString(ek.X509Data); err != nil {
return nil, fmt.Errorf("error decoding EncryptedKey certificate: %v", err)
} else if !bytes.Equal(cert.Certificate[0], encCert) {
return nil, fmt.Errorf("key decryption attempted with mismatched cert, SP cert(%.11s), assertion cert(%.11s)",
debugKeyFp(cert.Certificate[0]), debugKeyFp(encCert))
}
}
cipherText, err := base64.StdEncoding.DecodeString(ek.CipherValue)
if err != nil {
return nil, err
}
switch pk := cert.PrivateKey.(type) {
case *rsa.PrivateKey:
var h hash.Hash
if ek.EncryptionMethod.DigestMethod == nil {
//if digest method is not present lets set default method to SHA1.
//Digest method is used by methods like RSA-OAEP.
h = sha1.New()
} else {
switch ek.EncryptionMethod.DigestMethod.Algorithm {
case "", MethodSHA1:
h = sha1.New() // default
case MethodSHA256:
h = sha256.New()
case MethodSHA512:
h = sha512.New()
default:
return nil, fmt.Errorf("unsupported digest algorithm: %v",
ek.EncryptionMethod.DigestMethod.Algorithm)
}
}
switch ek.EncryptionMethod.Algorithm {
case "":
return nil, fmt.Errorf("missing encryption algorithm")
case MethodRSAOAEP, MethodRSAOAEP2:
pt, err := rsa.DecryptOAEP(h, rand.Reader, pk, cipherText, nil)
if err != nil {
return nil, fmt.Errorf("rsa internal error: %v", err)
}
b, err := aes.NewCipher(pt)
if err != nil {
return nil, err
}
return b, nil
case MethodRSAv1_5:
pt, err := rsa.DecryptPKCS1v15(rand.Reader, pk, cipherText)
if err != nil {
return nil, fmt.Errorf("rsa internal error: %v", err)
}
//From https://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf the xml encryption
//methods to be supported are from http://www.w3.org/2001/04/xmlenc#Element.
//https://www.w3.org/TR/2002/REC-xmlenc-core-20021210/Overview.html#Element.
//https://www.w3.org/TR/2002/REC-xmlenc-core-20021210/#sec-Algorithms
//Sec 5.4 Key Transport:
//The RSA v1.5 Key Transport algorithm given below are those used in conjunction with TRIPLEDES
//Please also see https://www.w3.org/TR/xmlenc-core/#sec-Algorithms and
//https://www.w3.org/TR/xmlenc-core/#rsav15note.
b, err := des.NewTripleDESCipher(pt)
if err != nil {
return nil, err
}
return b, nil
default:
return nil, fmt.Errorf("unsupported encryption algorithm: %s", ek.EncryptionMethod.Algorithm)
}
}
return nil, fmt.Errorf("no cipher for decoding symmetric key")
}
| debugKeyFp |
bubbleSort.ts | /**
* 冒泡排序 O(n^2)
* @param arr 数组
*/
export default function bubbleSort(arr: number[]) {
const len = arr.length;
let temp: number;
// let loopCount = 0;
// let changeCount = 0;
for (let i = 0; i < len; i++) {
// 优化点:如果一次循环中没有发生冒泡,则说明已经排序完成,可直接停止循环
let complete = true;
for (let j = 0; j < len - 1 - i; j++) {
// loopCount++;
if (arr[j] > arr[j + 1]) {
// changeCount++;
// [arr[j], arr[j + 1]] = [arr[j + 1], arr[j]]; bad performance
temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
complete = false;
}
}
if (complete) break;
} | // console.log(`循环次数:${loopCount}\n交换次数:${changeCount}`);
} |
|
TelemetryLogger.ts | import { ReportConfiguration } from "../config/ReportConfiguration";
import { PipelineType } from "../config/pipeline/PipelineType";
import { EnumUtils } from "../utils/EnumUtils";
const now = require("performance-now")
export class TelemetryLogger {
public static readonly TELEMETRY_LINE =
"##vso[telemetry.publish area=AgentTasks;feature=EmailReportTask]";
private static instance: TelemetryLogger;
private static reportConfig: ReportConfiguration;
/**
* Formats and sends all telemetry collected to be published
*/
public static LogTaskConfig(reportConfiguration: ReportConfiguration): void {
this.reportConfig = reportConfiguration;
const pipelineConfig = this.reportConfig.$pipelineConfiguration;
const mailConfig = this.reportConfig.$mailConfiguration;
const reportDataConfig = this.reportConfig.$reportDataConfiguration;
let pipelineTypeString: string = "Release";
let environmentId: number = 0;
if (pipelineConfig.$pipelineType == PipelineType.Build) {
pipelineTypeString = "Build";
} else {
environmentId = pipelineConfig.$environmentId;
}
const groupTestSummary: string[] = reportDataConfig.$groupTestSummaryBy.map(g => EnumUtils.GetGroupTestResultsByString(g));
let groupTestSummaryString = groupTestSummary[0];
if (groupTestSummary.length > 0) {
groupTestSummaryString = groupTestSummary.join(",");
}
this.logTelemetry({
pipelineId: pipelineConfig.$pipelineId,
pipelineType: pipelineTypeString,
projectId: pipelineConfig.$projectId,
projectName: pipelineConfig.$projectName,
environmentId: environmentId,
taskConfiguration: {
sendMailCondition: EnumUtils.GetMailConditionString(this.reportConfig.$sendMailCondition),
smtpHost: mailConfig.$smtpConfig.$smtpHost,
smtpUserName: mailConfig.$smtpConfig.$userName,
enableTLs: mailConfig.$smtpConfig.$enableTLS,
includeCommits: reportDataConfig.$includeCommits,
includeOthersInTotal: reportDataConfig.$includeOthersInTotal,
groupTestSummaryBy: groupTestSummaryString,
testResultsConfiguration: {
includeFailedTests: reportDataConfig.$testResultsConfig.$includeFailedTests,
includeInconclusiveTests: reportDataConfig.$testResultsConfig.$includeInconclusiveTests,
includeNotExecutedTests: reportDataConfig.$testResultsConfig.$includeNotExecutedTests,
includeOtherTests: reportDataConfig.$testResultsConfig.$includeOtherTests,
includePassedTests: reportDataConfig.$testResultsConfig.$includePassedTests,
maxItemsToShow: reportDataConfig.$testResultsConfig.$maxItemsToShow
}
}
});
} | public static LogModulePerf(moduleName: string, timeTaken: number, numRetries: Number = 0) {
const timeTakenString = timeTaken.toFixed(2);
if (numRetries < 1) {
this.logTelemetry({
"ModuleName": `${moduleName}`,
"PERF": `${timeTakenString}`
});
} else {
this.logTelemetry({
"ModuleName": `${moduleName}`,
"PERF": `${timeTakenString}`,
"Retries": `${numRetries}`
});
}
}
/**
* Publishes an object as a string as telemetry
* @param telemetryToLog Object to be logged as a string
*/
private static logTelemetry(telemetryToLog: {}) {
console.log(
TelemetryLogger.TELEMETRY_LINE + JSON.stringify(telemetryToLog)
);
}
public static async InvokeWithPerfLogger<T>(executor: () => Promise<T>, executorName: string): Promise<T> {
const perfStart = now();
let returnVal: T;
try {
returnVal = await executor();
}
finally {
// Log time taken by the dataprovider
TelemetryLogger.LogModulePerf(executorName, now() - perfStart);
}
return returnVal;
}
} | |
from_tsv.rs | use crate::commands::from_structured_data::from_structured_data;
use crate::commands::WholeStreamCommand;
use crate::prelude::*;
pub struct FromTSV;
#[derive(Deserialize)]
pub struct FromTSVArgs {
headerless: bool,
}
impl WholeStreamCommand for FromTSV {
fn name(&self) -> &str {
"from-tsv"
}
fn signature(&self) -> Signature {
Signature::build("from-tsv") | fn usage(&self) -> &str {
"Parse text as .tsv and create table."
}
fn run(
&self,
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
args.process(registry, from_tsv)?.run()
}
}
fn from_tsv(
FromTSVArgs { headerless }: FromTSVArgs,
runnable_context: RunnableContext,
) -> Result<OutputStream, ShellError> {
from_structured_data(headerless, '\t', "TSV", runnable_context)
} | .switch("headerless", "don't treat the first row as column names")
}
|
CONFIG_FIELD.go | /*
* Copyright (c) 2012-2020 MIRACL UK Ltd.
*
* This file is part of MIRACL Core
* (see https://github.com/miracl/core).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at | * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package NIST384
// Modulus types
const NOT_SPECIAL int = 0
const PSEUDO_MERSENNE int = 1
const MONTGOMERY_FRIENDLY int = 2
const GENERALISED_MERSENNE int = 3
const NEGATOWER int = 0
const POSITOWER int = 1
// Modulus details
const MODBITS uint = 384 /* Number of bits in Modulus */
const PM1D2 uint = 1 /* Modulus mod 8 */
const RIADZ int = -12 /* hash-to-point Z */
const RIADZG2A int = 0 /* G2 hash-to-point Z */
const RIADZG2B int = 0 /* G2 hash-to-point Z */
const MODTYPE int = NOT_SPECIAL //NOT_SPECIAL
const QNRI int = 0 // Fp2 QNR
const TOWER int = NEGATOWER // Tower type
const FEXCESS int32=((int32(1)<<8)-1)
// Modulus Masks
const OMASK Chunk = ((Chunk(-1)) << (MODBITS % BASEBITS))
const TBITS uint = MODBITS % BASEBITS // Number of active bits in top word
const TMASK Chunk = (Chunk(1) << TBITS) - 1
const BIG_ENDIAN_SIGN bool = false; | *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software |
app.js | document.getElementById('button1').addEventListener('click', getText);
document.getElementById('button2').addEventListener('click', getJson);
document.getElementById('button3').addEventListener('click', getExternal);
// Get local text file data
function getText() {
fetch('test.txt')
.then(function(res){
return res.text();
})
.then(function(data) {
console.log(data);
document.getElementById('output').innerHTML = data;
})
.catch(function(err){
console.log(err);
});
}
// Get local json data
function getJson() {
fetch('posts.json')
.then(function(res){
return res.json();
})
.then(function(data) {
console.log(data);
let output = '';
data.forEach(function(post) {
output += `<li>${post.title}</li>`;
});
document.getElementById('output').innerHTML = output;
})
.catch(function(err){
console.log(err);
});
}
// Get from external API
function getExternal() {
fetch('https://api.github.com/users')
.then(function(res){
return res.json();
})
.then(function(data) {
console.log(data);
let output = ''; | })
.catch(function(err){
console.log(err);
});
} | data.forEach(function(user) {
output += `<li>${user.login}</li>`;
});
document.getElementById('output').innerHTML = output; |
terminal.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: terminal.proto
package terminal
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TerminalResize struct {
Columns int32 `protobuf:"varint,1,opt,name=columns,proto3" json:"columns,omitempty"`
Rows int32 `protobuf:"varint,2,opt,name=rows,proto3" json:"rows,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TerminalResize) Reset() { *m = TerminalResize{} }
func (m *TerminalResize) String() string { return proto.CompactTextString(m) }
func (*TerminalResize) ProtoMessage() {}
func (*TerminalResize) Descriptor() ([]byte, []int) {
return fileDescriptor_ff8b8260c8ef16ad, []int{0}
}
func (m *TerminalResize) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TerminalResize.Unmarshal(m, b)
}
func (m *TerminalResize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TerminalResize.Marshal(b, m, deterministic)
}
func (m *TerminalResize) XXX_Merge(src proto.Message) {
xxx_messageInfo_TerminalResize.Merge(m, src)
}
func (m *TerminalResize) XXX_Size() int {
return xxx_messageInfo_TerminalResize.Size(m)
}
func (m *TerminalResize) XXX_DiscardUnknown() {
xxx_messageInfo_TerminalResize.DiscardUnknown(m)
}
var xxx_messageInfo_TerminalResize proto.InternalMessageInfo
func (m *TerminalResize) GetColumns() int32 {
if m != nil {
return m.Columns
}
return 0
}
func (m *TerminalResize) GetRows() int32 {
if m != nil {
return m.Rows
}
return 0
}
type SessionRequest struct {
// Types that are valid to be assigned to Command:
// *SessionRequest_Message
// *SessionRequest_Resize
Command isSessionRequest_Command `protobuf_oneof:"command"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionRequest) Reset() { *m = SessionRequest{} }
func (m *SessionRequest) String() string { return proto.CompactTextString(m) }
func (*SessionRequest) ProtoMessage() {}
func (*SessionRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_ff8b8260c8ef16ad, []int{1}
}
func (m *SessionRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionRequest.Unmarshal(m, b)
}
func (m *SessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionRequest.Marshal(b, m, deterministic)
}
func (m *SessionRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionRequest.Merge(m, src)
}
func (m *SessionRequest) XXX_Size() int {
return xxx_messageInfo_SessionRequest.Size(m)
}
func (m *SessionRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SessionRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SessionRequest proto.InternalMessageInfo
type isSessionRequest_Command interface {
isSessionRequest_Command()
}
type SessionRequest_Message struct {
Message string `protobuf:"bytes,1,opt,name=message,proto3,oneof"`
}
type SessionRequest_Resize struct {
Resize *TerminalResize `protobuf:"bytes,2,opt,name=resize,proto3,oneof"`
}
func (*SessionRequest_Message) isSessionRequest_Command() {}
func (*SessionRequest_Resize) isSessionRequest_Command() {}
func (m *SessionRequest) GetCommand() isSessionRequest_Command {
if m != nil {
return m.Command
}
return nil
}
func (m *SessionRequest) GetMessage() string {
if x, ok := m.GetCommand().(*SessionRequest_Message); ok {
return x.Message
}
return ""
}
func (m *SessionRequest) GetResize() *TerminalResize {
if x, ok := m.GetCommand().(*SessionRequest_Resize); ok {
return x.Resize
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*SessionRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _SessionRequest_OneofMarshaler, _SessionRequest_OneofUnmarshaler, _SessionRequest_OneofSizer, []interface{}{
(*SessionRequest_Message)(nil),
(*SessionRequest_Resize)(nil),
}
}
func _SessionRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*SessionRequest)
// command
switch x := m.Command.(type) {
case *SessionRequest_Message:
b.EncodeVarint(1<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Message)
case *SessionRequest_Resize:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Resize); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("SessionRequest.Command has unexpected type %T", x)
}
return nil
}
func _SessionRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*SessionRequest)
switch tag {
case 1: // command.message
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Command = &SessionRequest_Message{x}
return true, err
case 2: // command.resize
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(TerminalResize)
err := b.DecodeMessage(msg)
m.Command = &SessionRequest_Resize{msg}
return true, err
default:
return false, nil
}
}
func _SessionRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*SessionRequest)
// command
switch x := m.Command.(type) {
case *SessionRequest_Message:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.Message)))
n += len(x.Message)
case *SessionRequest_Resize:
s := proto.Size(x.Resize)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type SessionResponse struct {
Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SessionResponse) Reset() { *m = SessionResponse{} }
func (m *SessionResponse) String() string { return proto.CompactTextString(m) }
func (*SessionResponse) ProtoMessage() {}
func (*SessionResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_ff8b8260c8ef16ad, []int{2}
}
func (m *SessionResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SessionResponse.Unmarshal(m, b)
}
func (m *SessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SessionResponse.Marshal(b, m, deterministic)
}
func (m *SessionResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SessionResponse.Merge(m, src)
}
func (m *SessionResponse) XXX_Size() int {
return xxx_messageInfo_SessionResponse.Size(m)
}
func (m *SessionResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SessionResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SessionResponse proto.InternalMessageInfo
func (m *SessionResponse) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func init() {
proto.RegisterType((*TerminalResize)(nil), "terminal.TerminalResize")
proto.RegisterType((*SessionRequest)(nil), "terminal.SessionRequest")
proto.RegisterType((*SessionResponse)(nil), "terminal.SessionResponse")
}
func init() { proto.RegisterFile("terminal.proto", fileDescriptor_ff8b8260c8ef16ad) }
var fileDescriptor_ff8b8260c8ef16ad = []byte{
// 247 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0xc4, 0x40,
0x0c, 0x87, 0xb7, 0x8b, 0x6e, 0x6d, 0x16, 0x2a, 0xe6, 0x54, 0x8b, 0x07, 0xe9, 0x69, 0x41, 0xd8,
0x4a, 0xbd, 0x7b, 0xf0, 0xb4, 0xe7, 0x51, 0x1f, 0x60, 0xd4, 0x50, 0x06, 0x3b, 0x93, 0xda, 0xcc,
0x22, 0x78, 0xf4, 0x15, 0x7c, 0x34, 0x5f, 0xc1, 0x07, 0x11, 0xa7, 0x7f, 0xb0, 0xb0, 0xb7, 0x49,
0xe6, 0x97, 0x7c, 0x1f, 0x81, 0xd4, 0x53, 0x67, 0x8d, 0xd3, 0xcd, 0xb6, 0xed, 0xd8, 0x33, 0x9e,
0x8c, 0x75, 0x7e, 0x51, 0x33, 0xd7, 0x0d, 0x95, 0xba, 0x35, 0xa5, 0x76, 0x8e, 0xbd, 0xf6, 0x86,
0x9d, 0xf4, 0xb9, 0xe2, 0x16, 0xd2, 0x87, 0x21, 0xa9, 0x48, 0xcc, 0x07, 0x61, 0x06, 0xf1, 0x33,
0x37, 0x7b, 0xeb, 0x24, 0x8b, 0x2e, 0xa3, 0xcd, 0xb1, 0x1a, 0x4b, 0x44, 0x38, 0xea, 0xf8, 0x5d,
0xb2, 0x65, 0x68, 0x87, 0x77, 0xf1, 0x0a, 0xe9, 0x3d, 0x89, 0x18, 0x76, 0x8a, 0xde, 0xf6, 0x24,
0x1e, 0x73, 0x88, 0x2d, 0x89, 0xe8, 0x9a, 0xc2, 0x7c, 0xb2, 0x5b, 0xa8, 0xb1, 0x81, 0x15, 0xac,
0xba, 0x40, 0x09, 0x3b, 0xd6, 0x55, 0xb6, 0x9d, 0xb4, 0xe7, 0x16, 0xbb, 0x85, 0x1a, 0x92, 0x77,
0xc9, 0x9f, 0x8f, 0xb5, 0xda, 0xbd, 0x14, 0x57, 0x70, 0x3a, 0xc1, 0xa4, 0x65, 0x27, 0xc1, 0x76,
0x46, 0x9b, 0x58, 0x95, 0x86, 0xe9, 0x06, 0xf8, 0x08, 0xf1, 0x30, 0x88, 0xff, 0x90, 0x73, 0xf1,
0xfc, 0xfc, 0xc0, 0x4f, 0x4f, 0x29, 0xce, 0x3e, 0xbf, 0x7f, 0xbe, 0x96, 0x6b, 0x4c, 0xca, 0x31,
0xb2, 0x89, 0xae, 0xa3, 0xa7, 0x55, 0xb8, 0xe1, 0xcd, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x23,
0x32, 0x28, 0x45, 0x7d, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// TerminalClient is the client API for Terminal service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TerminalClient interface {
Session(ctx context.Context, opts ...grpc.CallOption) (Terminal_SessionClient, error)
}
type terminalClient struct {
cc *grpc.ClientConn
}
func | (cc *grpc.ClientConn) TerminalClient {
return &terminalClient{cc}
}
func (c *terminalClient) Session(ctx context.Context, opts ...grpc.CallOption) (Terminal_SessionClient, error) {
stream, err := c.cc.NewStream(ctx, &_Terminal_serviceDesc.Streams[0], "/terminal.terminal/Session", opts...)
if err != nil {
return nil, err
}
x := &terminalSessionClient{stream}
return x, nil
}
type Terminal_SessionClient interface {
Send(*SessionRequest) error
Recv() (*SessionResponse, error)
grpc.ClientStream
}
type terminalSessionClient struct {
grpc.ClientStream
}
func (x *terminalSessionClient) Send(m *SessionRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *terminalSessionClient) Recv() (*SessionResponse, error) {
m := new(SessionResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// TerminalServer is the server API for Terminal service.
type TerminalServer interface {
Session(Terminal_SessionServer) error
}
func RegisterTerminalServer(s *grpc.Server, srv TerminalServer) {
s.RegisterService(&_Terminal_serviceDesc, srv)
}
func _Terminal_Session_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TerminalServer).Session(&terminalSessionServer{stream})
}
type Terminal_SessionServer interface {
Send(*SessionResponse) error
Recv() (*SessionRequest, error)
grpc.ServerStream
}
type terminalSessionServer struct {
grpc.ServerStream
}
func (x *terminalSessionServer) Send(m *SessionResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *terminalSessionServer) Recv() (*SessionRequest, error) {
m := new(SessionRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Terminal_serviceDesc = grpc.ServiceDesc{
ServiceName: "terminal.terminal",
HandlerType: (*TerminalServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Session",
Handler: _Terminal_Session_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "terminal.proto",
}
| NewTerminalClient |
runtime.rs | // Copyright (c) Microsoft. All rights reserved.
#![deny(rust_2018_idioms, warnings)]
#![deny(clippy::all, clippy::pedantic)]
#![allow(clippy::default_trait_access, clippy::too_many_lines)]
use std::collections::{BTreeMap, HashMap};
use std::str;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use failure::Fail;
use futures::future;
use futures::prelude::*;
use hyper::{Body, Method, Request, Response, StatusCode};
use maplit::btreemap;
use serde_json::{self, json};
use tempfile::NamedTempFile;
use typed_headers::{mime, ContentLength, ContentType, HeaderMapExt};
use url::form_urlencoded::parse as parse_query;
use docker::models::{
AuthConfig, ContainerCreateBody, ContainerHostConfig, ContainerNetworkSettings,
ContainerSummary, HostConfig, HostConfigPortBindings, ImageDeleteResponseItem, NetworkConfig,
};
use edgelet_core::{
ImagePullPolicy, LogOptions, LogTail, MakeModuleRuntime, Module, ModuleRegistry, ModuleRuntime,
ModuleSpec, RegistryOperation, RuntimeOperation,
};
use edgelet_docker::{DockerConfig, DockerModuleRuntime, Settings};
use edgelet_docker::{Error, ErrorKind};
use edgelet_test_utils::web::{
make_req_dispatcher, HttpMethod, RequestHandler, RequestPath, ResponseFuture,
};
use edgelet_test_utils::{routes, run_tcp_server};
use hyper::Error as HyperError;
const IMAGE_NAME: &str = "nginx:latest";
const INVALID_IMAGE_NAME: &str = "invalidname:latest";
const INVALID_IMAGE_HOST: &str = "invalidhost.com/nginx:latest";
fn make_settings(moby_runtime: &str) -> Settings {
use std::io::Write;
lazy_static::lazy_static! {
static ref ENV_LOCK: std::sync::Mutex<()> = Default::default();
}
let _env_lock = ENV_LOCK.lock().expect("env lock poisoned");
let mut config_file = NamedTempFile::new().expect("could not create tempfile for config");
config_file
.write_all(
r#"
hostname = "zoo"
homedir = "/var/lib/aziot/edged"
[agent]
name = "edgeAgent"
type = "docker"
[agent.config]
image = "microsoft/azureiotedge-agent:1.0"
[connect]
workload_uri = "unix:///var/lib/iotedge/workload.sock"
management_uri = "unix:///var/lib/iotedge/mgmt.sock"
[listen]
workload_uri = "unix:///var/lib/iotedge/workload.sock"
management_uri = "unix:///var/lib/iotedge/mgmt.sock"
"#
.as_bytes(),
)
.expect("could not write to config file");
config_file
.write_all(moby_runtime.as_bytes())
.expect("could not write to config file");
std::env::set_var("AZIOT_EDGED_CONFIG", config_file.path());
Settings::new().unwrap()
}
fn make_get_networks_handler(
on_get: impl Fn() -> String + Clone + Send + 'static,
) -> impl Fn(Request<Body>) -> ResponseFuture + Clone {
move |_| {
let response = on_get();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response)) as ResponseFuture
}
}
fn make_create_network_handler(
on_post: impl Fn(Request<Body>) + Clone + Send + 'static,
) -> impl Fn(Request<Body>) -> ResponseFuture + Clone {
move |req| {
on_post(req);
let response = json!({
"Id": "12345",
"Warnings": ""
})
.to_string();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response)) as ResponseFuture
}
}
fn not_found_handler(_: Request<Body>) -> ResponseFuture {
let response = Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::default())
.unwrap();
Box::new(future::ok(response))
}
fn make_network_handler(
on_get: impl Fn() -> String + Clone + Send + 'static,
on_post: impl Fn(Request<Body>) + Clone + Send + 'static,
) -> impl Fn(Request<Body>) -> Box<dyn Future<Item = Response<Body>, Error = HyperError> + Send> + Clone
{
let dispatch_table = routes!(
GET "/networks" => make_get_networks_handler(on_get),
POST "/networks/create" => make_create_network_handler(on_post),
);
make_req_dispatcher(dispatch_table, Box::new(not_found_handler))
}
fn default_get_networks_handler() -> impl Fn(Request<Body>) -> ResponseFuture + Clone {
make_get_networks_handler(|| json!([]).to_string())
}
fn default_create_network_handler() -> impl Fn(Request<Body>) -> ResponseFuture + Clone {
make_create_network_handler(|_| ())
}
fn default_network_handler(
) -> impl Fn(Request<Body>) -> Box<dyn Future<Item = Response<Body>, Error = HyperError> + Send> + Clone
{
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
);
make_req_dispatcher(dispatch_table, Box::new(not_found_handler))
}
#[allow(clippy::needless_pass_by_value)]
fn invalid_image_name_pull_handler(req: Request<Body>) -> ResponseFuture {
// verify that path is /images/create and that the "fromImage" query
// parameter has the image name we expect
assert_eq!(req.uri().path(), "/images/create");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("fromImage"));
assert_eq!(
query_map.get("fromImage").map(AsRef::as_ref),
Some(INVALID_IMAGE_NAME)
);
let response = format!(
r#"{{
"message": "manifest for {} not found"
}}
"#,
INVALID_IMAGE_NAME
);
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
*response.status_mut() = hyper::StatusCode::NOT_FOUND;
Box::new(future::ok(response))
}
#[test]
fn image_pull_with_invalid_image_name_fails() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/images/create" => invalid_image_name_pull_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let auth = AuthConfig::new()
.with_username("u1".to_string())
.with_password("bleh".to_string())
.with_email("[email protected]".to_string())
.with_serveraddress("svr1".to_string());
let config = DockerConfig::new(
INVALID_IMAGE_NAME.to_string(),
ContainerCreateBody::new(),
None,
Some(auth),
)
.unwrap();
runtime.pull(&config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
// Assert
let err = runtime
.block_on(task)
.expect_err("Expected runtime pull method to fail due to invalid image name.");
match (err.kind(), err.cause().and_then(Fail::downcast_ref)) {
(
edgelet_docker::ErrorKind::RegistryOperation(
edgelet_core::RegistryOperation::PullImage(name),
),
Some(edgelet_docker::ErrorKind::NotFound(message)),
) if name == INVALID_IMAGE_NAME => {
assert_eq!(
&format!("manifest for {} not found", INVALID_IMAGE_NAME),
message
);
}
_ => panic!(
"Specific docker runtime message is expected for invalid image name. Got {:?}",
err.kind()
),
}
}
#[allow(clippy::needless_pass_by_value)]
fn invalid_image_host_pull_handler(req: Request<Body>) -> ResponseFuture {
// verify that path is /images/create and that the "fromImage" query
// parameter has the image name we expect
assert_eq!(req.uri().path(), "/images/create");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("fromImage"));
assert_eq!(
query_map.get("fromImage").map(AsRef::as_ref),
Some(INVALID_IMAGE_HOST)
);
let response = format!(
r#"
{{
"message":"Get https://invalidhost.com: dial tcp: lookup {} on X.X.X.X: no such host"
}}
"#,
INVALID_IMAGE_HOST
);
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
*response.status_mut() = hyper::StatusCode::INTERNAL_SERVER_ERROR;
Box::new(future::ok(response))
}
#[test]
fn image_pull_with_invalid_image_host_fails() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/images/create" => invalid_image_host_pull_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let auth = AuthConfig::new()
.with_username("u1".to_string())
.with_password("bleh".to_string())
.with_email("[email protected]".to_string())
.with_serveraddress("svr1".to_string());
let config = DockerConfig::new(
INVALID_IMAGE_HOST.to_string(),
ContainerCreateBody::new(),
None,
Some(auth),
)
.unwrap();
runtime.pull(&config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
// Assert
let err = runtime
.block_on(task)
.expect_err("Expected runtime pull method to fail due to invalid image host.");
match (err.kind(), err.cause().and_then(Fail::downcast_ref)) {
(
edgelet_docker::ErrorKind::RegistryOperation(
edgelet_core::RegistryOperation::PullImage(name),
),
Some(edgelet_docker::ErrorKind::FormattedDockerRuntime(message)),
) if name == INVALID_IMAGE_HOST => {
assert_eq!(
&format!(
"Get https://invalidhost.com: dial tcp: lookup {} on X.X.X.X: no such host",
INVALID_IMAGE_HOST
),
message
);
}
_ => panic!(
"Specific docker runtime message is expected for invalid image host. Got {:?}",
err.kind()
),
}
}
#[allow(clippy::needless_pass_by_value)]
fn image_pull_with_invalid_creds_handler(req: Request<Body>) -> ResponseFuture {
// verify that path is /images/create and that the "fromImage" query
// parameter has the image name we expect
assert_eq!(req.uri().path(), "/images/create");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("fromImage"));
assert_eq!(query_map.get("fromImage"), Some(&IMAGE_NAME.to_string()));
// verify registry creds
let auth_str = req
.headers()
.get_all("X-Registry-Auth")
.into_iter()
.map(|bytes| base64::decode_config(bytes, base64::URL_SAFE).unwrap())
.map(|raw| str::from_utf8(&raw).unwrap().to_owned())
.collect::<String>();
let auth_config: AuthConfig = serde_json::from_str(&auth_str).unwrap();
assert_eq!(auth_config.username(), Some("us1"));
assert_eq!(auth_config.password(), Some("ac?ac~aaac???"));
assert_eq!(auth_config.email(), Some("[email protected]"));
assert_eq!(auth_config.serveraddress(), Some("svr1"));
let response = format!(
r#"
{{
"message":"Get {}: unauthorized: authentication required"
}}
"#,
IMAGE_NAME
);
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
*response.status_mut() = hyper::StatusCode::INTERNAL_SERVER_ERROR;
Box::new(future::ok(response))
}
#[test]
fn image_pull_with_invalid_creds_fails() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/images/create" => image_pull_with_invalid_creds_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
// password is written to guarantee base64 encoding has '-' and/or '_'
let auth = AuthConfig::new()
.with_username("us1".to_string())
.with_password("ac?ac~aaac???".to_string())
.with_email("[email protected]".to_string())
.with_serveraddress("svr1".to_string());
let config = DockerConfig::new(
IMAGE_NAME.to_string(),
ContainerCreateBody::new(),
None,
Some(auth),
)
.unwrap();
runtime.pull(&config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
// Assert
let err = runtime
.block_on(task)
.expect_err("Expected runtime pull method to fail due to unauthentication.");
match (err.kind(), err.cause().and_then(Fail::downcast_ref)) {
(
edgelet_docker::ErrorKind::RegistryOperation(
edgelet_core::RegistryOperation::PullImage(name),
),
Some(edgelet_docker::ErrorKind::FormattedDockerRuntime(message)),
) if name == IMAGE_NAME => {
assert_eq!(
&format!(
"Get {}: unauthorized: authentication required",
&IMAGE_NAME.to_string()
),
message
);
}
_ => panic!(
"Specific docker runtime message is expected for unauthentication. Got {:?}",
err.kind()
),
}
}
#[allow(clippy::needless_pass_by_value)]
fn image_pull_handler(req: Request<Body>) -> ResponseFuture {
// verify that path is /images/create and that the "fromImage" query
// parameter has the image name we expect
assert_eq!(req.uri().path(), "/images/create");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("fromImage"));
assert_eq!(query_map.get("fromImage"), Some(&IMAGE_NAME.to_string()));
let response = r#"
{
"Id": "img1",
"Warnings": []
}
"#;
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response))
}
#[test]
fn image_pull_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/images/create" => image_pull_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let auth = AuthConfig::new()
.with_username("u1".to_string())
.with_password("bleh".to_string())
.with_email("[email protected]".to_string())
.with_serveraddress("svr1".to_string());
let config = DockerConfig::new(
IMAGE_NAME.to_string(),
ContainerCreateBody::new(),
None,
Some(auth),
)
.unwrap();
runtime.pull(&config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn image_pull_with_creds_handler(req: Request<Body>) -> ResponseFuture {
// verify that path is /images/create and that the "fromImage" query
// parameter has the image name we expect
assert_eq!(req.uri().path(), "/images/create");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("fromImage"));
assert_eq!(query_map.get("fromImage"), Some(&IMAGE_NAME.to_string()));
// verify registry creds
let auth_str = req
.headers()
.get_all("X-Registry-Auth")
.into_iter()
.map(|bytes| base64::decode_config(bytes, base64::URL_SAFE).unwrap())
.map(|raw| str::from_utf8(&raw).unwrap().to_owned())
.collect::<String>();
let auth_config: AuthConfig = serde_json::from_str(&auth_str).unwrap();
assert_eq!(auth_config.username(), Some("u1"));
assert_eq!(auth_config.password(), Some("bleh"));
assert_eq!(auth_config.email(), Some("[email protected]"));
assert_eq!(auth_config.serveraddress(), Some("svr1"));
let response = r#"
{
"Id": "img1",
"Warnings": []
}
"#;
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response))
}
#[test]
fn image_pull_with_creds_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/images/create" => image_pull_with_creds_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let auth = AuthConfig::new()
.with_username("u1".to_string())
.with_password("bleh".to_string())
.with_email("[email protected]".to_string())
.with_serveraddress("svr1".to_string());
let config = DockerConfig::new(
IMAGE_NAME.to_string(),
ContainerCreateBody::new(),
None,
Some(auth),
)
.unwrap();
runtime.pull(&config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn image_remove_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::DELETE);
assert_eq!(req.uri().path(), &format!("/images/{}", IMAGE_NAME));
let response = serde_json::to_string(&vec![
ImageDeleteResponseItem::new().with_deleted(IMAGE_NAME.to_string())
])
.unwrap();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response))
}
#[test]
fn image_remove_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
DELETE format!("/images/{}", IMAGE_NAME) => image_remove_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| ModuleRegistry::remove(&runtime, IMAGE_NAME));
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
fn container_create_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::POST);
assert_eq!(req.uri().path(), "/containers/create");
let response = json!({
"Id": "12345",
"Warnings": []
})
.to_string();
let response_len = response.len();
Box::new(
req.into_body()
.concat2()
.and_then(|body| {
let create_options: ContainerCreateBody =
serde_json::from_slice(body.as_ref()).unwrap();
assert_eq!("nginx:latest", create_options.image().unwrap());
for &v in &["/do/the/custom/command", "with these args"] {
assert!(create_options.cmd().unwrap().contains(&v.to_string()));
}
for &v in &["/also/do/the/entrypoint", "and this"] {
assert!(create_options
.entrypoint()
.unwrap()
.contains(&v.to_string()));
}
for &v in &["k1=v1", "k2=v2", "k3=v3", "k4=v4", "k5=v5"] {
assert!(create_options.env().unwrap().contains(&v.to_string()));
}
let port_bindings = create_options
.host_config()
.unwrap()
.port_bindings()
.unwrap();
assert_eq!(
"8080",
port_bindings
.get("80/tcp")
.unwrap()
.iter()
.next()
.unwrap()
.host_port()
.unwrap()
);
assert_eq!(
"11022",
port_bindings
.get("22/tcp")
.unwrap()
.iter()
.next()
.unwrap()
.host_port()
.unwrap()
);
let volumes = create_options.volumes().unwrap();
let mut expected = ::std::collections::BTreeMap::new();
expected.insert("test1".to_string(), json!({}));
assert_eq!(*volumes, expected);
Ok(())
})
.map(move |_| {
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
response
}),
)
}
#[test]
fn container_create_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/containers/create" => container_create_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let mut env = BTreeMap::new();
env.insert("k1".to_string(), "v1".to_string());
env.insert("k2".to_string(), "v2".to_string());
env.insert("k3".to_string(), "v3".to_string());
// add some create options
let mut port_bindings = BTreeMap::new();
port_bindings.insert(
"22/tcp".to_string(),
vec![HostConfigPortBindings::new().with_host_port("11022".to_string())],
);
port_bindings.insert(
"80/tcp".to_string(),
vec![HostConfigPortBindings::new().with_host_port("8080".to_string())],
);
let memory: i64 = 3_221_225_472;
let mut volumes = ::std::collections::BTreeMap::new();
volumes.insert("test1".to_string(), json!({}));
let create_options = ContainerCreateBody::new()
.with_host_config(
HostConfig::new()
.with_port_bindings(port_bindings)
.with_memory(memory),
)
.with_cmd(vec![
"/do/the/custom/command".to_string(),
"with these args".to_string(),
])
.with_entrypoint(vec![
"/also/do/the/entrypoint".to_string(),
"and this".to_string(),
])
.with_env(vec!["k4=v4".to_string(), "k5=v5".to_string()])
.with_volumes(volumes);
let module_config = ModuleSpec::new(
"m1".to_string(),
"docker".to_string(),
DockerConfig::new("nginx:latest".to_string(), create_options, None, None).unwrap(),
env,
ImagePullPolicy::default(),
)
.unwrap();
runtime.create(module_config)
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn container_start_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::POST);
assert_eq!(req.uri().path(), "/containers/m1/start");
Box::new(future::ok(Response::new(Body::empty())))
}
#[test]
fn container_start_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/containers/m1/start" => container_start_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| runtime.start("m1"));
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn container_stop_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::POST);
assert_eq!(req.uri().path(), "/containers/m1/stop");
Box::new(future::ok(Response::new(Body::empty())))
}
#[test]
fn container_stop_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/containers/m1/stop" => container_stop_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task =
DockerModuleRuntime::make_runtime(settings).and_then(|runtime| runtime.stop("m1", None));
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn container_stop_with_timeout_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::POST);
assert_eq!(req.uri().path(), "/containers/m1/stop");
assert_eq!(req.uri().query().unwrap(), "t=600");
Box::new(future::ok(Response::new(Body::empty())))
}
#[test]
fn container_stop_with_timeout_succeeds() |
#[allow(clippy::needless_pass_by_value)]
fn container_remove_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::DELETE);
assert_eq!(req.uri().path(), "/containers/m1");
Box::new(future::ok(Response::new(Body::empty())))
}
#[test]
fn container_remove_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
DELETE "/containers/m1" => container_remove_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| ModuleRuntime::remove(&runtime, "m1"));
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[allow(clippy::needless_pass_by_value)]
fn container_list_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::GET);
assert_eq!(req.uri().path(), "/containers/json");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("filters"));
assert_eq!(
query_map.get("filters"),
Some(
&json!({
"label": vec!["net.azure-devices.edge.owner=Microsoft.Azure.Devices.Edge.Agent"]
})
.to_string()
)
);
let mut labels = HashMap::new();
labels.insert("l1".to_string(), "v1".to_string());
labels.insert("l2".to_string(), "v2".to_string());
labels.insert("l3".to_string(), "v3".to_string());
let modules = vec![
ContainerSummary::new(
"m1".to_string(),
vec!["/m1".to_string()],
"nginx:latest".to_string(),
"img1".to_string(),
"".to_string(),
10,
vec![],
10,
10,
labels.clone(),
"".to_string(),
"".to_string(),
ContainerHostConfig::new(""),
ContainerNetworkSettings::new(HashMap::new()),
vec![],
),
ContainerSummary::new(
"m2".to_string(),
vec!["/m2".to_string()],
"ubuntu:latest".to_string(),
"img2".to_string(),
"".to_string(),
10,
vec![],
10,
10,
labels.clone(),
"".to_string(),
"".to_string(),
ContainerHostConfig::new(""),
ContainerNetworkSettings::new(HashMap::new()),
vec![],
),
ContainerSummary::new(
"m3".to_string(),
vec!["/m3".to_string()],
"mongo:latest".to_string(),
"img3".to_string(),
"".to_string(),
10,
vec![],
10,
10,
labels,
"".to_string(),
"".to_string(),
ContainerHostConfig::new(""),
ContainerNetworkSettings::new(HashMap::new()),
vec![],
),
];
let response = serde_json::to_string(&modules).unwrap();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response))
}
#[test]
fn container_list_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
GET "/containers/json" => container_list_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| runtime.list());
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
let modules = runtime.block_on(task).unwrap();
assert_eq!(3, modules.len());
assert_eq!("m1", modules[0].name());
assert_eq!("m2", modules[1].name());
assert_eq!("m3", modules[2].name());
assert_eq!("img1", modules[0].config().image_id().unwrap());
assert_eq!("img2", modules[1].config().image_id().unwrap());
assert_eq!("img3", modules[2].config().image_id().unwrap());
assert_eq!("nginx:latest", modules[0].config().image());
assert_eq!("ubuntu:latest", modules[1].config().image());
assert_eq!("mongo:latest", modules[2].config().image());
for module in modules {
for i in 0..3 {
assert_eq!(
module
.config()
.create_options()
.labels()
.unwrap()
.get(&format!("l{}", i + 1)),
Some(&format!("v{}", i + 1))
);
}
}
}
#[allow(clippy::needless_pass_by_value)]
fn container_logs_handler(req: Request<Body>) -> ResponseFuture {
assert_eq!(req.method(), &Method::GET);
assert_eq!(req.uri().path(), "/containers/mod1/logs");
let query_map: HashMap<String, String> = parse_query(req.uri().query().unwrap().as_bytes())
.into_owned()
.collect();
assert!(query_map.contains_key("stdout"));
assert!(query_map.contains_key("stderr"));
assert!(query_map.contains_key("follow"));
assert!(query_map.contains_key("tail"));
assert_eq!("true", query_map["follow"]);
assert_eq!("all", query_map["tail"]);
assert_eq!("100000", query_map["since"]);
let body = vec![
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x52, 0x6f, 0x73, 0x65, 0x73, 0x20, 0x61,
0x72, 0x65, 0x20, 0x72, 0x65, 0x64, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x76,
0x69, 0x6f, 0x6c, 0x65, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x62, 0x6c, 0x75, 0x65,
];
Box::new(future::ok(Response::new(body.into())))
}
#[test]
fn container_logs_succeeds() {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
GET "/containers/mod1/logs" => container_logs_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings).and_then(|runtime| {
let options = LogOptions::new()
.with_follow(true)
.with_tail(LogTail::All)
.with_since(100_000)
.with_until(200_000);
runtime.logs("mod1", &options)
});
let expected_body = [
0x01_u8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x52, 0x6f, 0x73, 0x65, 0x73, 0x20,
0x61, 0x72, 0x65, 0x20, 0x72, 0x65, 0x64, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
0x76, 0x69, 0x6f, 0x6c, 0x65, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x62, 0x6c, 0x75,
0x65,
];
let assert = task.and_then(Stream::concat2).and_then(|b| {
assert_eq!(&expected_body[..], b.as_ref());
Ok(())
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(assert).unwrap();
}
#[test]
fn image_remove_with_white_space_name_fails() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let image_name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| ModuleRegistry::remove(&runtime, image_name))
.then(|res| match res {
Ok(_) => Err("Expected error but got a result.".to_string()),
Err(err) => match err.kind() {
ErrorKind::RegistryOperation(RegistryOperation::RemoveImage(s))
if s == image_name =>
{
Ok(())
}
kind => panic!(
"Expected `RegistryOperation(RemoveImage)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn create_fails_for_non_docker_type() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "not_docker";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| {
let module_config = ModuleSpec::new(
"m1".to_string(),
name.to_string(),
DockerConfig::new(
"nginx:latest".to_string(),
ContainerCreateBody::new(),
None,
None,
)
.unwrap(),
BTreeMap::new(),
ImagePullPolicy::default(),
)
.unwrap();
runtime.create(module_config)
})
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::InvalidModuleType(s) if s == name => Ok::<_, Error>(()),
kind => panic!("Expected `InvalidModuleType` error but got {:?}.", kind),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn start_fails_for_empty_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.start(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StartModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn start_fails_for_white_space_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.start(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StartModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn stop_fails_for_empty_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.stop(name, None))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StopModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn stop_fails_for_white_space_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.stop(name, None))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::StopModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(StopModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn restart_fails_for_empty_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.restart(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RestartModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn restart_fails_for_white_space_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.restart(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RestartModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RestartModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn remove_fails_for_empty_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| ModuleRuntime::remove(&runtime, name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RemoveModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn remove_fails_for_white_space_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| ModuleRuntime::remove(&runtime, name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::RemoveModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(RemoveModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn get_fails_for_empty_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = "";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.get(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(GetModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn get_fails_for_white_space_id() {
let (server, port) = run_tcp_server("127.0.0.1", default_network_handler());
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let name = " ";
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.get(name))
.then(|result| match result {
Ok(_) => panic!("Expected test to fail but it didn't!"),
Err(err) => match err.kind() {
ErrorKind::RuntimeOperation(RuntimeOperation::GetModule(s)) if s == name => {
Ok::<_, Error>(())
}
kind => panic!(
"Expected `RuntimeOperation(GetModule)` error but got {:?}.",
kind
),
},
});
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
}
#[test]
fn runtime_init_network_does_not_exist_create() {
let list_got_called_lock = Arc::new(RwLock::new(false));
let list_got_called_lock_cloned = list_got_called_lock.clone();
let create_got_called_lock = Arc::new(RwLock::new(false));
let create_got_called_lock_cloned = create_got_called_lock.clone();
let network_handler = make_network_handler(
move || {
let mut list_got_called_w = list_got_called_lock.write().unwrap();
*list_got_called_w = true;
json!([]).to_string()
},
move |_| {
let mut create_got_called_w = create_got_called_lock.write().unwrap();
*create_got_called_w = true;
},
);
let (server, port) = run_tcp_server("127.0.0.1", network_handler);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
//act
let task = DockerModuleRuntime::make_runtime(settings);
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
//assert
assert_eq!(true, *list_got_called_lock_cloned.read().unwrap());
assert_eq!(true, *create_got_called_lock_cloned.read().unwrap());
}
#[test]
fn network_ipv6_create() {
let list_got_called_lock = Arc::new(RwLock::new(false));
let list_got_called_lock_cloned = list_got_called_lock.clone();
let create_got_called_lock = Arc::new(RwLock::new(false));
let create_got_called_lock_cloned = create_got_called_lock.clone();
let network_handler = make_network_handler(
move || {
let mut list_got_called_w = list_got_called_lock.write().unwrap();
*list_got_called_w = true;
json!([]).to_string()
},
move |req| {
let mut create_got_called_w = create_got_called_lock.write().unwrap();
*create_got_called_w = true;
let task = req
.into_body()
.concat2()
.map(|body| {
let network: NetworkConfig = serde_json::from_slice(&body).unwrap();
assert_eq!("my-network", network.name().as_str());
let ipam_config = network.IPAM().unwrap().config().unwrap();
let ipam_config_0 = ipam_config.get(0).unwrap();
assert_eq!(ipam_config_0["Gateway"], "172.18.0.1");
assert_eq!(ipam_config_0["Subnet"], "172.18.0.0/16");
assert_eq!(ipam_config_0["IPRange"], "172.18.0.0/16");
let ipam_config_1 = ipam_config.get(1).unwrap();
assert_eq!(ipam_config_1["Gateway"], "172.20.0.1");
assert_eq!(ipam_config_1["Subnet"], "172.20.0.0/16");
assert_eq!(ipam_config_1["IPRange"], "172.20.0.0/24");
})
.map_err(|err| panic!("{:?}", err));
tokio::spawn(task).into_future().wait().unwrap();
},
);
let (server, port) = run_tcp_server("127.0.0.1", network_handler);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
[moby_runtime.network]
name = "my-network"
ipv6 = true
[[moby_runtime.network.ipam.config]]
gateway = "172.18.0.1"
subnet = "172.18.0.0/16"
ip_range = "172.18.0.0/16"
[[moby_runtime.network.ipam.config]]
gateway = "172.20.0.1"
subnet = "172.20.0.0/16"
ip_range = "172.20.0.0/24"
"#,
port
));
//act
let task = DockerModuleRuntime::make_runtime(settings);
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
//assert
assert_eq!(true, *list_got_called_lock_cloned.read().unwrap());
assert_eq!(true, *create_got_called_lock_cloned.read().unwrap());
}
#[test]
fn runtime_init_network_exist_do_not_create() {
let list_got_called_lock = Arc::new(RwLock::new(false));
let list_got_called_lock_cloned = list_got_called_lock.clone();
let create_got_called_lock = Arc::new(RwLock::new(false));
let create_got_called_lock_cloned = create_got_called_lock.clone();
let network_handler = make_network_handler(
move || {
let mut list_got_called_w = list_got_called_lock.write().unwrap();
*list_got_called_w = true;
json!([
{
"Name": "azure-iot-edge",
"Id": "8e3209d08ed5e73d1c9c8e7580ddad232b6dceb5bf0c6d74cadbed75422eef0e",
"Created": "0001-01-01T00:00:00Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"Internal": false,
"Attachable": false,
"Ingress": false,
"IPAM": {
"Driver": "bridge",
"Config": []
},
"Containers": {},
"Options": {}
}
])
.to_string()
},
move |_| {
let mut create_got_called_w = create_got_called_lock.write().unwrap();
*create_got_called_w = true;
},
);
let (server, port) = run_tcp_server("127.0.0.1", network_handler);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
//act
let task = DockerModuleRuntime::make_runtime(settings);
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
//assert
assert_eq!(true, *list_got_called_lock_cloned.read().unwrap());
assert_eq!(false, *create_got_called_lock_cloned.read().unwrap());
}
#[test]
fn runtime_system_info_succeeds() {
let system_info_got_called_lock = Arc::new(RwLock::new(false));
let system_info_got_called_lock_cloned = system_info_got_called_lock.clone();
let on_system_info = move |req: Request<Body>| {
let mut system_info_got_called_w = system_info_got_called_lock.write().unwrap();
*system_info_got_called_w = true;
assert_eq!(req.uri().path(), "/info");
let response = json!(
{
"OSType": "linux",
"Architecture": "x86_64",
}
)
.to_string();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response)) as ResponseFuture
};
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
GET "/info" => on_system_info,
);
//act
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task =
DockerModuleRuntime::make_runtime(settings).and_then(|runtime| runtime.system_info());
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
let system_info = runtime.block_on(task).unwrap();
//assert
assert_eq!(true, *system_info_got_called_lock_cloned.read().unwrap());
assert_eq!("linux", system_info.os_type);
assert_eq!("x86_64", system_info.architecture);
}
#[test]
fn runtime_system_info_none_returns_unkown() {
let system_info_got_called_lock = Arc::new(RwLock::new(false));
let system_info_got_called_lock_cloned = system_info_got_called_lock.clone();
let on_system_info = move |req: Request<Body>| {
let mut system_info_got_called_w = system_info_got_called_lock.write().unwrap();
*system_info_got_called_w = true;
assert_eq!(req.uri().path(), "/info");
let response = json!({}).to_string();
let response_len = response.len();
let mut response = Response::new(response.into());
response
.headers_mut()
.typed_insert(&ContentLength(response_len as u64));
response
.headers_mut()
.typed_insert(&ContentType(mime::APPLICATION_JSON));
Box::new(future::ok(response)) as ResponseFuture
};
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
GET "/info" => on_system_info,
);
//act
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task =
DockerModuleRuntime::make_runtime(settings).and_then(|runtime| runtime.system_info());
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
let system_info = runtime.block_on(task).unwrap();
//assert
assert_eq!(true, *system_info_got_called_lock_cloned.read().unwrap());
assert_eq!("Unknown", system_info.os_type);
assert_eq!("Unknown", system_info.architecture);
}
| {
let dispatch_table = routes!(
GET "/networks" => default_get_networks_handler(),
POST "/networks/create" => default_create_network_handler(),
POST "/containers/m1/stop" => container_stop_with_timeout_handler,
);
let (server, port) = run_tcp_server(
"127.0.0.1",
make_req_dispatcher(dispatch_table, Box::new(not_found_handler)),
);
let server = server.map_err(|err| panic!(err));
let settings = make_settings(&format!(
r#"
[moby_runtime]
uri = "http://localhost:{}"
network = "azure-iot-edge"
"#,
port
));
let task = DockerModuleRuntime::make_runtime(settings)
.and_then(|runtime| runtime.stop("m1", Some(Duration::from_secs(600))));
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
runtime.spawn(server);
runtime.block_on(task).unwrap();
} |
opensmile.py | import os
import csv
import sys
import time
import pandas as pd
from sklearn.preprocessing import StandardScaler
from typing import Tuple
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
# 每个特征集的特征数量
FEATURE_NUM = {
'IS09_emotion': 384,
'IS10_paraling': 1582,
'IS11_speaker_state': 4368,
'IS12_speaker_trait': 6125,
'IS13_ComParE': 6373,
'ComParE_2016': 6373
}
'''
get_feature_opensmile(): Opensmile 提取一个音频的特征
输入:
config(Class)
file_path: 音频路径
输出:
该音频的特征向量
'''
def get_feature_opensmile(config, filepath: str):
# 项目路径
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# single_feature.csv 路径
single_feat_path = os.path.join(BASE_DIR, config.feature_path, 'single_feature.csv')
# Opensmile 配置文件路径
opensmile_config_path = os.path.join(config.opensmile_path, 'config', config.opensmile_config + '.conf')
# Opensmile 命令
cmd = 'cd ' + config.opensmile_path + ' && ./SMILExtract -C ' + opensmile_config_path + ' -I ' + filepath + ' -O ' + single_feat_path
print("Opensmile cmd: ", cmd)
os.system(cmd)
reader = csv.reader(open(single_feat_path,'r'))
rows = [row for row in reader]
last_line = rows[-1]
return last_line[1: FEATURE_NUM[config.opensmile_config] + 1]
'''
load_feature(): 从 .csv 文件中加载特征数据
输入:
config(Class)
feature_path: 特征文件路径
train: 是否为训练数据
输出:
训练数据、测试数据和对应的标签
'''
def load_feature(config, feature_path: str, train: bool):
# 加载特征数据
df = pd.read_csv(feature_path)
features = [str(i) for i in range(1, FEATURE_NUM[config.opensmile_config] + 1) | f.loc[:,features].values
Y = df.loc[:,'label'].values
# 标准化模型路径
scaler_path = os.path.join(config.checkpoint_path, 'SCALER_OPENSMILE.m')
if train == True:
# 标准化数据
scaler = StandardScaler().fit(X)
# 保存标准化模型
joblib.dump(scaler, scaler_path)
X = scaler.transform(X)
# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)
return x_train, x_test, y_train, y_test
else:
# 标准化数据
# 加载标准化模型
scaler = joblib.load(scaler_path)
X = scaler.transform(X)
return X
'''
get_data():
提取所有音频的特征: 遍历所有文件夹, 读取每个文件夹中的音频, 提取每个音频的特征,把所有特征保存在 feature_path 中
输入:
config(Class)
data_path: 数据集文件夹/测试文件路径
feature_path: 保存特征的路径
train: 是否为训练数据
输出:
train = True: 训练数据、测试数据特征和对应的标签
train = False: 预测数据特征
'''
# Opensmile 提取特征
def get_data(config, data_path, feature_path: str, train: bool):
writer = csv.writer(open(feature_path, 'w'))
first_row = ['label']
for i in range(1, FEATURE_NUM[config.opensmile_config] + 1):
first_row.append(str(i))
writer.writerow(first_row)
writer = csv.writer(open(feature_path, 'a+'))
print('Opensmile extracting...')
if train == True:
cur_dir = os.getcwd()
sys.stderr.write('Curdir: %s\n' % cur_dir)
os.chdir(data_path)
# 遍历文件夹
for i, directory in enumerate(config.class_labels):
sys.stderr.write("Started reading folder %s\n" % directory)
os.chdir(directory)
# label_name = directory
label = config.class_labels.index(directory)
# 读取该文件夹下的音频
for filename in os.listdir('.'):
if not filename.endswith('wav'):
continue
filepath = os.path.join(os.getcwd(), filename)
# 提取该音频的特征
feature_vector = get_feature_opensmile(config, filepath)
feature_vector.insert(0, label)
# 把每个音频的特征整理到一个 csv 文件中
writer.writerow(feature_vector)
sys.stderr.write("Ended reading folder %s\n" % directory)
os.chdir('..')
os.chdir(cur_dir)
else:
feature_vector = get_feature_opensmile(config, data_path)
feature_vector.insert(0, '-1')
writer.writerow(feature_vector)
print('Opensmile extract done.')
# 一个玄学 bug 的暂时性解决方案
# 这里无法直接加载除了 IS10_paraling 以外的其他特征集的预测数据特征,非常玄学
if(train == True):
return load_feature(config, feature_path, train = train) | ]
X = d |
__init__.py | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 CORTIER Benoît
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'decrire'"""
from primaires.interpreteur.commande.commande import Commande
class CmdDecrire(Commande):
"""Commande 'decrire'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "décrire", "describe")
self.groupe = "joueur"
self.aide_courte = "Ouvre un éditeur pour se décrire."
self.aide_longue = \
"Cette commande permet de manipuler votre description. " \
"Elle ouvre un éditeur dans lequel vous pouvez modifier " \
"cette description. La description doit d'abord être validée " \
"par un administrateur avant d'être visible à tous."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
editeur = type(self).importeur.interpreteur.construire_editeur(
"descedit", personnage, personnage)
personnage.contextes.ajouter(editeur)
editeur.actualiser() | |
test_WiFiServer.py | from mock_decorators import setup, teardown
from threading import Thread
import socket
import time
stop_client_thread = False
client_thread = None
@setup('Simple echo server')
def setup_echo_server(e):
global stop_client_thread
global client_thread
def echo_client_thread(): | server_address = socket.gethostbyname('esp8266-wfs-test.local')
count = 0
while count < 5 and not stop_client_thread:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_address, 5000))
sock.settimeout(1.0)
buf = 'a' * 1023 + '\n'
sock.sendall(buf)
data = ''
retries = 0
while len(data) < 1024 and retries < 3:
data += sock.recv(1024)
retries += 1
print('Received {} bytes'.format(len(data)))
if len(data) != 1024:
raise RuntimeError('client failed to receive response')
count += 1
stop_client_thread = False
client_thread = Thread(target=echo_client_thread)
client_thread.start()
@teardown('Simple echo server')
def teardown_echo_server(e):
global stop_client_thread
stop_client_thread = True
client_thread.join() | |
main.py | import sys
import numpy as np | import matplotlib.pyplot as plt
from ppa.ppa import prey_predator_algorithm
from ppa.config import Config
from acs.objective import fitness, fitness_population
from acs.instance import Instance, print_instance
def read_files(instance_config_filename, config_filename):
if instance_config_filename is None:
instance = Instance.load_test()
else:
instance = Instance.load_from_file(instance_config_filename)
# print_instance(instance)
# print("")
if config_filename is None:
config = Config.load_test()
else:
config = Config.load_from_file(config_filename)
return (instance, config)
# assert(len(sys.argv) >= 2)
instance_config_filename = None
if (len(sys.argv) >= 2):
instance_config_filename = sys.argv[1]
config_filename = None
if (len(sys.argv) >= 3):
config_filename = sys.argv[2]
num_repetitions = 10
(instance, config) = read_files(instance_config_filename, config_filename)
best_fitness = np.zeros((config.num_iterations + 1, num_repetitions)) # Um valor extra para salvar os valores iniciais
perf_counter = np.zeros((config.num_iterations + 1, num_repetitions))
process_time = np.zeros((config.num_iterations + 1, num_repetitions))
for i in range(num_repetitions):
(population, survival_values) = prey_predator_algorithm(instance, config, fitness_population, best_fitness=best_fitness[:,i], perf_counter=perf_counter[:,i], process_time=process_time[:,i])
print('#{}\n'.format(i))
print('Survival values:\n{}\n'.format(survival_values))
print('Best Individual:\n{}\n'.format(population[0]))
mean_best_fitness = np.mean(best_fitness, axis=1)
mean_perf_counter = np.mean(perf_counter, axis=1)
mean_process_time = np.mean(process_time, axis=1)
print('Statistics:')
print('Fitness:\n{}\n'.format(mean_best_fitness))
print('perf_counter:\n{}\n'.format(mean_perf_counter))
print('process_time:\n{}\n'.format(mean_process_time))
fig = plt.figure()
fig.suptitle('PPA: perf_counter vs. process_time')
plt.plot(mean_perf_counter, 'r.')
plt.plot(mean_process_time, 'b.')
plt.show()
fig = plt.figure()
fig.suptitle('PPA: best fitness')
plt.plot(mean_best_fitness, 'r')
plt.show() | |
pythonrun.rs | use crate::ffi::object::*;
#[cfg(not(Py_LIMITED_API))]
use crate::ffi::pyarena::PyArena;
#[cfg(not(Py_LIMITED_API))]
#[cfg(not(feature = "libc"))]
#[derive(Clone)]
pub enum FILE {}
#[cfg(feature = "libc")]
use libc::FILE;
use std::os::raw::{c_char, c_int};
// TODO: PyCF_MASK etc. constants
#[repr(C)]
#[derive(Copy, Clone)]
#[cfg(not(Py_LIMITED_API))]
pub struct PyCompilerFlags {
pub cf_flags: c_int,
}
#[cfg(Py_LIMITED_API)]
opaque_struct!(PyCompilerFlags);
#[cfg(not(Py_LIMITED_API))]
opaque_struct!(_mod);
#[cfg(not(Py_LIMITED_API))]
extern "C" {
pub fn PyRun_SimpleStringFlags(arg1: *const c_char, arg2: *mut PyCompilerFlags) -> c_int;
pub fn PyRun_AnyFileFlags(
arg1: *mut FILE,
arg2: *const c_char,
arg3: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_AnyFileExFlags(
fp: *mut FILE,
filename: *const c_char,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_SimpleFileExFlags(
fp: *mut FILE,
filename: *const c_char,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveOneFlags(
fp: *mut FILE,
filename: *const c_char,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveOneObject(
fp: *mut FILE,
filename: *mut PyObject,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveLoopFlags(
fp: *mut FILE,
filename: *const c_char,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyParser_ASTFromString(
s: *const c_char,
filename: *const c_char,
start: c_int,
flags: *mut PyCompilerFlags,
arena: *mut PyArena,
) -> *mut _mod;
pub fn PyParser_ASTFromStringObject(
s: *const c_char,
filename: *mut PyObject,
start: c_int,
flags: *mut PyCompilerFlags,
arena: *mut PyArena,
) -> *mut _mod;
pub fn PyParser_ASTFromFile(
fp: *mut FILE,
filename: *const c_char,
enc: *const c_char,
start: c_int,
ps1: *const c_char,
ps2: *const c_char,
flags: *mut PyCompilerFlags,
errcode: *mut c_int,
arena: *mut PyArena,
) -> *mut _mod;
pub fn PyParser_ASTFromFileObject(
fp: *mut FILE,
filename: *mut PyObject,
enc: *const c_char,
start: c_int,
ps1: *const c_char,
ps2: *const c_char,
flags: *mut PyCompilerFlags,
errcode: *mut c_int,
arena: *mut PyArena,
) -> *mut _mod;
}
opaque_struct!(symtable);
opaque_struct!(_node);
#[inline]
pub unsafe fn | (s: *const c_char, b: c_int) -> *mut _node {
PyParser_SimpleParseStringFlags(s, b, 0)
}
#[cfg(not(Py_LIMITED_API))]
#[inline]
pub unsafe fn PyParser_SimpleParseFile(fp: *mut FILE, s: *const c_char, b: c_int) -> *mut _node {
PyParser_SimpleParseFileFlags(fp, s, b, 0)
}
extern "C" {
pub fn PyParser_SimpleParseStringFlags(
arg1: *const c_char,
arg2: c_int,
arg3: c_int,
) -> *mut _node;
pub fn PyParser_SimpleParseStringFlagsFilename(
arg1: *const c_char,
arg2: *const c_char,
arg3: c_int,
arg4: c_int,
) -> *mut _node;
#[cfg(not(Py_LIMITED_API))]
pub fn PyParser_SimpleParseFileFlags(
arg1: *mut FILE,
arg2: *const c_char,
arg3: c_int,
arg4: c_int,
) -> *mut _node;
#[cfg(not(Py_LIMITED_API))]
#[cfg_attr(PyPy, link_name = "PyPyRun_StringFlags")]
pub fn PyRun_StringFlags(
arg1: *const c_char,
arg2: c_int,
arg3: *mut PyObject,
arg4: *mut PyObject,
arg5: *mut PyCompilerFlags,
) -> *mut PyObject;
#[cfg(not(Py_LIMITED_API))]
pub fn PyRun_FileExFlags(
fp: *mut FILE,
filename: *const c_char,
start: c_int,
globals: *mut PyObject,
locals: *mut PyObject,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> *mut PyObject;
#[cfg(Py_LIMITED_API)]
#[cfg(not(PyPy))]
pub fn Py_CompileString(string: *const c_char, p: *const c_char, s: c_int) -> *mut PyObject;
#[cfg(any(PyPy, not(Py_LIMITED_API)))]
#[cfg_attr(PyPy, link_name = "PyPy_CompileStringFlags")]
pub fn Py_CompileStringFlags(
string: *const c_char,
p: *const c_char,
s: c_int,
f: *mut PyCompilerFlags,
) -> *mut PyObject;
}
#[inline]
#[cfg(any(not(Py_LIMITED_API), PyPy))]
pub unsafe fn Py_CompileString(string: *const c_char, p: *const c_char, s: c_int) -> *mut PyObject {
#[cfg(not(PyPy))]
return Py_CompileStringExFlags(string, p, s, std::ptr::null_mut(), -1);
#[cfg(PyPy)]
Py_CompileStringFlags(string, p, s, std::ptr::null_mut())
}
extern "C" {
#[cfg(not(Py_LIMITED_API))]
#[cfg(not(PyPy))]
pub fn Py_CompileStringExFlags(
str: *const c_char,
filename: *const c_char,
start: c_int,
flags: *mut PyCompilerFlags,
optimize: c_int,
) -> *mut PyObject;
#[cfg(not(Py_LIMITED_API))]
pub fn Py_CompileStringObject(
str: *const c_char,
filename: *mut PyObject,
start: c_int,
flags: *mut PyCompilerFlags,
optimize: c_int,
) -> *mut PyObject;
pub fn Py_SymtableString(
str: *const c_char,
filename: *const c_char,
start: c_int,
) -> *mut symtable;
#[cfg(not(Py_LIMITED_API))]
pub fn Py_SymtableStringObject(
str: *const c_char,
filename: *mut PyObject,
start: c_int,
) -> *mut symtable;
#[cfg_attr(PyPy, link_name = "PyPyErr_Print")]
pub fn PyErr_Print();
#[cfg_attr(PyPy, link_name = "PyPyErr_PrintEx")]
pub fn PyErr_PrintEx(arg1: c_int);
#[cfg_attr(PyPy, link_name = "PyPyErr_Display")]
pub fn PyErr_Display(arg1: *mut PyObject, arg2: *mut PyObject, arg3: *mut PyObject);
}
| PyParser_SimpleParseString |
test_lsm01.py | #!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wtscenario, wttest
from wtdataset import SimpleDataSet
# test_lsm01.py
# Test LSM tree configuration options.
class test_lsm01(wttest.WiredTigerTestCase):
K = 1024
M = 1024 * K
G = 1024 * M
uri = "lsm:test_lsm01"
chunk_size_scenarios = wtscenario.quick_scenarios('s_chunk_size',
[1*M,20*M,None], [0.6,0.6,0.6])
merge_max_scenarios = wtscenario.quick_scenarios('s_merge_max',
[2,10,20,None], None)
bloom_scenarios = wtscenario.quick_scenarios('s_bloom',
[True,False,None], None)
bloom_bit_scenarios = wtscenario.quick_scenarios('s_bloom_bit_count',
[2,8,20,None], None)
bloom_hash_scenarios = wtscenario.quick_scenarios('s_bloom_hash_count',
[2,10,20,None], None)
# Occasionally add a lot of records, so that merges (and bloom) happen.
record_count_scenarios = wtscenario.quick_scenarios(
'nrecs', [10, 10000], [0.9, 0.1])
config_vars = [ 'chunk_size', 'merge_max', 'bloom',
'bloom_bit_count', 'bloom_hash_count' ]
scenarios = wtscenario.make_scenarios(
chunk_size_scenarios, merge_max_scenarios, bloom_scenarios,
bloom_bit_scenarios, bloom_hash_scenarios, record_count_scenarios,
prune=100, prunelong=500)
# Test drop of an object.
def | (self):
args = 'key_format=S'
args += ',lsm=(' # Start the LSM configuration options.
# add names to args, e.g. args += ',session_max=30'
for var in self.config_vars:
value = getattr(self, 's_' + var)
if value != None:
if var == 'verbose':
value = '[' + str(value) + ']'
if value == True:
value = 'true'
if value == False:
value = 'false'
args += ',' + var + '=' + str(value)
args += ')' # Close the LSM configuration option group
self.verbose(3,
'Test LSM with config: ' + args + ' count: ' + str(self.nrecs))
SimpleDataSet(self, self.uri, self.nrecs).populate()
# TODO: Adding an explicit drop here can cause deadlocks, if a merge
# is still happening. See issue #349.
# self.session.drop(self.uri)
if __name__ == '__main__':
wttest.run()
| test_lsm |
0003_ingredient.py | # Generated by Django 3.1.7 on 2021-03-10 08:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] |
|
calendar.js | function calendar(booking) {
let result = ""
let notEmpty = {}
const afterDay = today()
afterDay.setDate(afterDay.getDate() + 32)
booking.forEach((b) => {
for (let d = new Date(b.date_min); d <= new Date(b.date_max); d.setDate(d.getDate() + 1)) {
notEmpty[d.getTime()] = true
}
})
result += '<div class="calendar">'
for (let d = today(); d < afterDay; d.setDate(d.getDate() + 1)) {
result += '<div class="calendar__item' + (notEmpty[d.getTime()] === undefined ? ' calendar__item--booking' : '') + '">'
result += d.getDate().toString()
result += '</div>'
}
result += '</div>'
return result
}
function | () {
const d = new Date(new Date().toISOString().slice(0, 10))
d.setHours(0,0,0,0)
return d
} | today |
config.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Tong He and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import numpy as np
class config:
BASE_DIR = '../../../../../../data/fmri_predict_behavior'
CUR_DIR = os.getcwd()
INTER_DIR = os.path.join(BASE_DIR, 'He2019_data')
GRAPH_FOLDER = os.path.join(INTER_DIR, 'graph')
RAMDOM_SEED = 42
OUT_PATH = 'log'
| HCP_SUBJECT_LIST = 'He2019_hcp_953_split.mat'
HCP_ORIG_DIR = os.path.join(BASE_DIR, 'original_data_953')
HCP_INTER_DIR = os.path.join(INTER_DIR, 'HCP')
HCP_MEASURE_SETS = ['Cognitive', 'Personality_Task', 'Social_Emotion']
HCP_NUM_FOLD = 20
HCP_NUM_SUBJECT = 953
HCP_N_DIMENSION = 419
HCP_BATCH_SIZE = 128
HCP_MEASURE_SETS_NUM = [13, 22, 23]
HCP_N_MEASURE = int(np.sum(HCP_MEASURE_SETS_NUM))
# Config for UKBB
UKBB_CORR_MAT = 'ukbb_ht_180205_FC_55.mat'
UKBB_SUBJECT_LIST = 'ukbb_subject_split.mat'
UKBB_ORIG_DIR = os.path.join(BASE_DIR, 'original_data_ukbb_8868')
UKBB_INTER_DIR = os.path.join(INTER_DIR, 'UKBB')
UKBB_MEASURE_SETS = ['1802_8868']
UKBB_NUM_SUBJECT = 8868
UKBB_RUNS = 5
UKBB_BATCH_SIZE = 128
UKBB_EPOCHS = 200
UKBB_EPOCHS_GCNN = 2000
UKBB_N_DIM = 55
# Config for example
EXAMPLE_N_SUBJECT = 40
EXAMPLE_N_FOLDS = 4 | # Config for HCP
HCP_CORR_MAT = 'FC_subject_953.mat' |
Redirect.js | "use strict";
/**
* Encapsulates a redirect to the given route.
*/
function | (to, params, query) {
this.to = to;
this.params = params;
this.query = query;
}
module.exports = Redirect; | Redirect |
pair.rs | // Copyright (c) 2015-2017 Contributors as noted in the AUTHORS file.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to those terms.
pub use std::time::Duration;
pub use std::thread;
pub use std::io;
pub use scaproust::*;
pub use super::{urls, make_session, make_timeout, sleep_some};
fn before_each() -> (Session, Socket, Socket, String) {
let _ = ::env_logger::init();
let mut session = make_session();
let mut left = session.create_socket::<Pair>().expect("Failed to create socket !");
let mut right = session.create_socket::<Pair>().expect("Failed to create socket !");
let url = urls::ipc::get();
let timeout = make_timeout();
left.set_send_timeout(timeout).expect("Failed to set send timeout !");
left.set_recv_timeout(timeout).expect("Failed to set recv timeout !");
right.set_send_timeout(timeout).expect("Failed to set send timeout !");
right.set_recv_timeout(timeout).expect("Failed to set recv timeout !");
(session, left, right, url)
}
#[test]
fn send_a_message_through_local_endpoint() {
let (session, mut left, mut right, url) = before_each();
left.bind(&url).unwrap();
sleep_some();
right.connect(&url).unwrap();
sleep_some();
let sent = vec![65, 66, 67];
left.send(sent).unwrap();
let received = right.recv().unwrap();
assert_eq!(vec![65, 66, 67], received);
drop(session);
}
#[test]
fn send_a_message_through_remote_endpoint() {
let (session, mut left, mut right, url) = before_each();
right.bind(&url).unwrap();
sleep_some();
left.connect(&url).unwrap();
sleep_some();
let sent = vec![65, 66, 67];
left.send(sent).unwrap();
let received = right.recv().unwrap();
assert_eq!(vec![65, 66, 67], received);
drop(session);
}
#[test]
fn | () {
let (session, mut left, mut right, url) = before_each();
left.bind(&url).unwrap();
sleep_some();
right.connect(&url).unwrap();
sleep_some();
let sent_ltr = vec![65, 66, 67];
left.send(sent_ltr).unwrap();
let received_ltr = right.recv().unwrap();
assert_eq!(vec![65, 66, 67], received_ltr);
let sent_rtl = vec![67, 66, 65];
right.send(sent_rtl).unwrap();
let received_rtl = left.recv().unwrap();
assert_eq!(vec![67, 66, 65], received_rtl);
drop(session);
}
| send_a_message_back_and_forth |
imgFlip.js | eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('(12(A,w){12 81(){7(!c.4N){24{s.1T.aS("1D")}23(a){5u(81,1);14}c.2K()}}12 aF(a,b){b.5M?c.4Q({1B:b.5M,3J:1d,2m:"2a"}):c.7W(b.1K||b.8G||b.2I||"");b.1p&&b.1p.2s(b)}12 X(a,b,d,f,e,j){17 i=a.18;7(1g b==="1S"){19(17 o 1r b)X(a,o,b[o],f,e,d);14 a}7(d!==w){f=!j&&f&&c.1u(d);19(o=0;o<i;o++)e(a[o],b,f?d.1j(a[o],o,e(a[o],b)):d,j);14 a}14 i?e(a[0],b):w}12 J(){14(2F 9p).9e()}12 Y(){14 1d}12 Z(){14 1a}12 6W(a,b,d){d[0].1k=a;14 c.1m.29.1E(b,d)}12 7d(a){17 b,d=[],f=[],e=1s,j,i,o,k,n,r;i=c.1h(6,"2u");7(!(a.a0===6||!i||!i.3b||a.2C&&a.1k==="4c")){a.a0=6;17 u=i.3b.27(0);19(k=0;k<u.18;k++){i=u[k];i.4n.1q(O,"")===a.1k?f.1t(i.1J):u.2y(k--,1)}j=c(a.2d).57(f,a.50);n=0;19(r=j.18;n<r;n++)19(k=0;k<u.18;k++){i=u[k];7(j[n].1J===i.1J){o=j[n].1o;f=1c;7(i.6w==="4q"||i.6w==="4r")f=c(a.4D).57(i.1J)[0];7(!f||f!==o)d.1t({1o:o,41:i})}}n=0;19(r=d.18;n<r;n++){j=d[n];a.50=j.1o;a.1h=j.41.1h;a.41=j.41;7(j.41.9x.1E(j.1o,e)===1d){b=1d;25}}14 b}}12 6y(a,b){14"3b."+(a&&a!=="*"?a+".":"")+b.1q(/\\./g,"`").1q(/ /g,"&")}12 73(a){14!a||!a.1p||a.1p.1f===11}12 7s(a,b){17 d=0;b.1i(12(){7(6.1n===(a[d]&&a[d].1n)){17 f=c.1h(a[d++]),e=c.1h(6,f);7(f=f&&f.2u){2q e.29;e.2u={};19(17 j 1r f)19(17 i 1r f[j])c.1m.2l(6,j,f[j][i],f[j][i].1h)}}})}12 7v(a,b,d){17 f,e,j;b=b&&b[0]?b[0].1H||b[0]:s;7(a.18===1&&1g a[0]==="1y"&&a[0].18<b0&&b===s&&!7x.1e(a[0])&&(c.1x.5Y||!7w.1e(a[0]))){e=1a;7(j=c.7e[a[0]])7(j!==1)f=j}7(!f){f=b.8W();c.7f(a,b,f,d)}7(e)c.7e[a[0]]=j?f:1;14{4Y:f,7u:e}}12 K(a,b){17 d={};c.1i(8b.5J.1E([],8b.27(0,b)),12(){d[6]=a});14 d}12 8a(a){14"8e"1r a&&a.35?a:a.1f===9?a.56||a.aY:1d}17 c=12(a,b){14 2F c.1v.8l(a,b)},ap=A.5h,9y=A.$,s=A.35,T,9W=/^[^<]*(<[\\w\\W]+>)[^>]*$|^#([\\w-]+)$/,8V=/^.[^:#\\[\\.,]*$/,8n=/\\S/,8m=/^(\\s|\\a2)+|(\\s|\\a2)+$/g,9X=/^<(\\w+)\\s*\\/?>(?:<\\/\\1>)?$/,P=b9.b8,7D=1d,Q=[],L,$=5P.2h.aR,aa=5P.2h.b7,ba=2Z.2h.1t,R=2Z.2h.27,7L=2Z.2h.1P;c.1v=c.2h={8l:12(a,b){17 d,f;7(!a)14 6;7(a.1f){6.26=6[0]=a;6.18=1;14 6}7(a==="1M"&&!b){6.26=s;6[0]=s.1M;6.1J="1M";6.18=1;14 6}7(1g a==="1y")7((d=9W.1N(a))&&(d[1]||!b))7(d[1]){f=b?b.1H||b:s;7(a=9X.1N(a))7(c.6j(b)){a=[s.1O(a[1])];c.1v.3s.1j(a,b,1a)}1b a=[f.1O(a[1])];1b{a=7v([d[1]],[f]);a=(a.7u?a.4Y.3F(1a):a.4Y).2B}14 c.3C(6,a)}1b{7(b=s.4l(d[2])){7(b.5a!==d[2])14 T.1V(a);6.18=1;6[0]=b}6.26=s;6.1J=a;14 6}1b 7(!b&&/^\\w+$/.1e(a)){6.1J=a;6.26=s;a=s.28(a);14 c.3C(6,a)}1b 14!b||b.4H?(b||T).1V(a):c(b).1V(a);1b 7(c.1u(a))14 T.2K(a);7(a.1J!==w){6.1J=a.1J;6.26=a.26}14 c.36(a,6)},1J:"",4H:"1.4.2",18:0,b6:12(){14 6.18},6r:12(){14 R.1j(6,0)},3L:12(a){14 a==1c?6.6r():a<0?6.27(a)[0]:6[a]},2J:12(a,b,d){17 f=c();c.2t(a)?ba.1E(f,a):c.3C(f,a);f.75=6;f.26=6.26;7(b==="1V")f.1J=6.1J+(6.1J?" ":"")+d;1b 7(b)f.1J=6.1J+"."+b+"("+d+")";14 f},1i:12(a,b){14 c.1i(6,a,b)},2K:12(a){c.7c();7(c.4N)a.1j(s,c);1b Q&&Q.1t(a);14 6},48:12(a){14 a===-1?6.27(a):6.27(a,+a+1)},4e:12(){14 6.48(0)},5g:12(){14 6.48(-1)},27:12(){14 6.2J(R.1E(6,1s),"27",R.1j(1s).3D(","))},2f:12(a){14 6.2J(c.2f(6,12(b,d){14 a.1j(b,d,b)}))},42:12(){14 6.75||c(1c)},1t:ba,3V:[].3V,2y:[].2y};c.1v.8l.2h=c.1v;c.1w=c.1v.1w=12(){17 a=1s[0]||{},b=1,d=1s.18,f=1d,e,j,i,o;7(1g a==="5l"){f=a;a=1s[1]||{};b=2}7(1g a!=="1S"&&!c.1u(a))a={};7(d===b){a=6;--b}19(;b<d;b++)7((e=1s[b])!=1c)19(j 1r e){i=a[j];o=e[j];7(a!==o)7(f&&o&&(c.6j(o)||c.2t(o))){i=i&&(c.6j(i)||c.2t(i))?i:c.2t(o)?[]:{};a[j]=c.1w(f,i,o)}1b 7(o!==w)a[j]=o}14 a};c.1w({b5:12(a){A.$=9y;7(a)A.5h=ap;14 c},4N:1d,2K:12(){7(!c.4N){7(!s.1M)14 5u(c.2K,13);c.4N=1a;7(Q){19(17 a,b=0;a=Q[b++];)a.1j(s,c);Q=1c}c.1v.6l&&c(s).6l("2K")}},7c:12(){7(!7D){7D=1a;7(s.3z==="2E")14 c.2K();7(s.37){s.37("95",L,1d);A.37("5t",c.2K,1d)}1b 7(s.33){s.33("3y",L);A.33("7z",c.2K);17 a=1d;24{a=A.aL==1c}23(b){}s.1T.aS&&a&&81()}}},1u:12(a){14 $.1j(a)==="[1S 8x]"},2t:12(a){14 $.1j(a)==="[1S 2Z]"},6j:12(a){7(!a||$.1j(a)!=="[1S 5P]"||a.1f||a.5e)14 1d;7(a.8d&&!aa.1j(a,"8d")&&!aa.1j(a.8d.2h,"b2"))14 1d;17 b;19(b 1r a);14 b===w||aa.1j(a,b)},5j:12(a){19(17 b 1r a)14 1d;14 1a},2o:12(a){8C a},8S:12(a){7(1g a!=="1y"||!a)14 1c;a=c.6i(a);7(/^[\\],:{}\\s]*$/.1e(a.1q(/\\\\(?:["\\\\\\/b1]|u[0-9a-aZ-F]{4})/g,"@").1q(/"[^"\\\\\\n\\r]*"|1a|1d|1c|-?\\d+(?:\\.\\d*)?(?:[b3][+\\-]?\\d+)?/g,"]").1q(/(?:^|:|,)(?:\\s*\\[)+/g,"")))14 A.67&&A.67.8N?A.67.8N(a):(2F 8x("14 "+a))();1b c.2o("bT 67: "+a)},4W:12(){},7W:12(a){7(a&&8n.1e(a)){17 b=s.28("aI")[0]||s.1T,d=s.1O("2a");d.1k="1K/3G";7(c.1x.7K)d.2P(s.4v(a));1b d.1K=a;b.2R(d,b.1A);b.2s(d)}},1n:12(a,b){14 a.1n&&a.1n.5z()===b.5z()},1i:12(a,b,d){17 f,e=0,j=a.18,i=j===w||c.1u(a);7(d)7(i)19(f 1r a){7(b.1E(a[f],d)===1d)25}1b 19(;e<j;){7(b.1E(a[e++],d)===1d)25}1b 7(i)19(f 1r a){7(b.1j(a[f],f,a[f])===1d)25}1b 19(d=a[0];e<j&&b.1j(d,e,d)!==1d;d=a[++e]);14 a},6i:12(a){14(a||"").1q(8m,"")},36:12(a,b){b=b||[];7(a!=1c)a.18==1c||1g a==="1y"||c.1u(a)||1g a!=="12"&&a.5e?ba.1j(b,a):c.3C(b,a);14 b},3I:12(a,b){7(b.1P)14 b.1P(a);19(17 d=0,f=b.18;d<f;d++)7(b[d]===a)14 d;14-1},3C:12(a,b){17 d=a.18,f=0;7(1g b.18==="3M")19(17 e=b.18;f<e;f++)a[d++]=b[f];1b 19(;b[f]!==w;)a[d++]=b[f++];a.18=d;14 a},45:12(a,b,d){19(17 f=[],e=0,j=a.18;e<j;e++)!d!==!b(a[e],e)&&f.1t(a[e]);14 f},2f:12(a,b,d){19(17 f=[],e,j=0,i=a.18;j<i;j++){e=b(a[j],j,d);7(e!=1c)f[f.18]=e}14 f.5J.1E([],f)},1Z:1,46:12(a,b,d){7(1s.18===2)7(1g b==="1y"){d=a;a=d[b];b=w}1b 7(b&&!c.1u(b)){d=b;b=w}7(!b&&a)b=12(){14 a.1E(d||6,1s)};7(a)b.1Z=a.1Z=a.1Z||b.1Z||c.1Z++;14 b},8Z:12(a){a=a.1F();a=/(8X)[ \\/]([\\w.]+)/.1N(a)||/(bS)(?:.*5G)?[ \\/]([\\w.]+)/.1N(a)||/(bU) ([\\w.]+)/.1N(a)||!/bV/.1e(a)&&/(bW)(?:.*? bR:([\\w.]+))?/.1N(a)||[];14{3w:a[1]||"",5G:a[2]||"0"}},3w:{}});P=c.8Z(P);7(P.3w){c.3w[P.3w]=1a;c.3w.5G=P.5G}7(c.3w.8X)c.3w.bQ=1a;7(7L)c.3I=12(a,b){14 7L.1j(b,a)};T=c(s);7(s.37)L=12(){s.5Q("95",L,1d);c.2K()};1b 7(s.33)L=12(){7(s.3z==="2E"){s.7r("3y",L);c.2K()}};(12(){c.1x={};17 a=s.1T,b=s.1O("2a"),d=s.1O("1G"),f="2a"+J();d.1l.22="3g";d.2I=" <6S/><2i></2i><a 2Q=\'/a\' 1l=\'9Z:98;6D:1D;1X:.55;\'>a</a><38 1k=\'3Y\'/>";17 e=d.28("*"),j=d.28("a")[0];7(!(!e||!e.18||!j)){c.1x={7h:d.1A.1f===3,2r:!d.28("2r").18,9w:!!d.28("6S").18,1l:/98/.1e(j.2w("1l")),aN:j.2w("2Q")==="/a",1X:/^0.55$/.1e(j.1l.1X),6p:!!j.1l.6p,ar:d.28("38")[0].2D==="2W",bL:s.1O("2j").2P(s.1O("49")).4g,1p:d.2s(d.2P(s.1O("1G"))).1p===1c,5T:1a,5Y:1d,7K:1d,7n:1a,4t:1c};b.1k="1K/3G";24{b.2P(s.4v("9I."+f+"=1;"))}23(i){}a.2R(b,a.1A);7(A[f]){c.1x.7K=1a;2q A[f]}24{2q b.1e}23(o){c.1x.5T=1d}a.2s(b);7(d.33&&d.9b){d.33("7R",12 k(){c.1x.7n=1d;d.7r("7R",k)});d.3F(1a).9b("7R")}d=s.1O("1G");d.2I="<38 1k=\'3f\' 34=\'bK\' 2V=\'2V\'/>";a=s.8W();a.2P(d.1A);c.1x.5Y=a.3F(1a).3F(1a).7i.2V;c(12(){17 k=s.1O("1G");k.1l.2p=k.1l.8w="3Q";s.1M.2P(k);c.4t=c.1x.4t=k.5D===2;s.1M.2s(k).1l.22="3g"});a=12(k){17 n=s.1O("1G");k="2W"+k;17 r=k 1r n;7(!r){n.aO(k,"14;");r=1g n[k]==="12"}14 r};c.1x.8y=a("3v");c.1x.9k=a("4u");a=b=d=e=j=1c}})();c.4y={"19":"9s","3c":"1U",bM:"9n",bN:"bP",9g:"bO",bX:"bY",c8:"c7",c9:"7A",cd:"c6",c5:"bc"};17 G="5h"+J(),8o=0,7X={};c.1w({2O:{},44:G,4x:{6Q:1a,1S:1a,bZ:1a},1h:12(a,b,d){7(!(a.1n&&c.4x[a.1n.1F()])){a=a==A?7X:a;17 f=a[G],e=c.2O;7(!f&&1g b==="1y"&&d===w)14 1c;f||(f=++8o);7(1g b==="1S"){a[G]=f;e[f]=c.1w(1a,{},b)}1b 7(!e[f]){a[G]=f;e[f]={}}a=e[f];7(d!==w)a[b]=d;14 1g b==="1y"?a[b]:a}},4R:12(a,b){7(!(a.1n&&c.4x[a.1n.1F()])){a=a==A?7X:a;17 d=a[G],f=c.2O,e=f[d];7(b){7(e){2q e[b];c.5j(e)&&c.4R(a)}}1b{7(c.1x.5T)2q a[c.44];1b a.4F&&a.4F(c.44);2q f[d]}}}});c.1v.1w({1h:12(a,b){7(1g a==="2M"&&6.18)14 c.1h(6[0]);1b 7(1g a==="1S")14 6.1i(12(){c.1h(6,a)});17 d=a.2A(".");d[1]=d[1]?"."+d[1]:"";7(b===w){17 f=6.6l("c2"+d[1]+"!",[d[0]]);7(f===w&&6.18)f=c.1h(6[0],a);14 f===w&&d[1]?6.1h(d[0]):f}1b 14 6.2k("c3"+d[1]+"!",[d[0],b]).1i(12(){c.1h(6,a,b)})},4R:12(a){14 6.1i(12(){c.4R(6,a)})}});c.1w({2G:12(a,b,d){7(a){b=(b||"1R")+"2G";17 f=c.1h(a,b);7(!d)14 f||[];7(!f||c.2t(d))f=c.1h(a,b,c.36(d));1b f.1t(d);14 f}},3l:12(a,b){b=b||"1R";17 d=c.2G(a,b),f=d.3a();7(f==="7U")f=d.3a();7(f){b==="1R"&&d.6U("7U");f.1j(a,12(){c.3l(a,b)})}}});c.1v.1w({2G:12(a,b){7(1g a!=="1y"){b=a;a="1R"}7(b===w)14 c.2G(6[0],a);14 6.1i(12(){17 d=c.2G(6,a,b);a==="1R"&&d[0]!=="7U"&&c.3l(6,a)})},3l:12(a){14 6.1i(12(){c.3l(6,a)})},bn:12(a,b){a=c.1R?c.1R.5H[a]||a:a;b=b||"1R";14 6.2G(b,12(){17 d=6;5u(12(){c.3l(d,b)},a)})},bm:12(a){14 6.2G(a||"1R",[])}});17 86=/[\\n\\t]/g,ca=/\\s+/,ao=/\\r/g,$a=/2Q|5M|1l/,ab=/(2C|38)/i,bb=/(2C|38|1S|2j|52)/i,cb=/^(a|6R)$/i,7H=/3f|3Y/;c.1v.1w({3s:12(a,b){14 X(6,a,b,1a,c.3s)},bo:12(a){14 6.1i(12(){c.3s(6,a,"");6.1f===1&&6.4F(a)})},88:12(a){7(c.1u(a))14 6.1i(12(n){17 r=c(6);r.88(a.1j(6,n,r.3s("3c")))});7(a&&1g a==="1y")19(17 b=(a||"").2A(ca),d=0,f=6.18;d<f;d++){17 e=6[d];7(e.1f===1)7(e.1U){19(17 j=" "+e.1U+" ",i=e.1U,o=0,k=b.18;o<k;o++)7(j.1P(" "+b[o]+" ")<0)i+=" "+b[o];e.1U=c.6i(i)}1b e.1U=a}14 6},87:12(a){7(c.1u(a))14 6.1i(12(k){17 n=c(6);n.87(a.1j(6,k,n.3s("3c")))});7(a&&1g a==="1y"||a===w)19(17 b=(a||"").2A(ca),d=0,f=6.18;d<f;d++){17 e=6[d];7(e.1f===1&&e.1U)7(a){19(17 j=(" "+e.1U+" ").1q(86," "),i=0,o=b.18;i<o;i++)j=j.1q(" "+b[i]+" "," ");e.1U=c.6i(j)}1b e.1U=""}14 6},aj:12(a,b){17 d=1g a,f=1g b==="5l";7(c.1u(a))14 6.1i(12(e){17 j=c(6);j.aj(a.1j(6,e,j.3s("3c"),b),b)});14 6.1i(12(){7(d==="1y")19(17 e,j=0,i=c(6),o=b,k=a.2A(ca);e=k[j++];){o=f?o:!i.an(e);i[o?"88":"87"](e)}1b 7(d==="2M"||d==="5l"){6.1U&&c.1h(6,"am",6.1U);6.1U=6.1U||a===1d?"":c.1h(6,"am")||""}})},an:12(a){a=" "+a+" ";19(17 b=0,d=6.18;b<d;b++)7((" "+6[b].1U+" ").1q(86," ").1P(a)>-1)14 1a;14 1d},3K:12(a){7(a===w){17 b=6[0];7(b){7(c.1n(b,"49"))14(b.bs.2D||{}).aW?b.2D:b.1K;7(c.1n(b,"2j")){17 d=b.5r,f=[],e=b.1C;b=b.1k==="2j-5i";7(d<0)14 1c;17 j=b?d:0;19(d=b?d+1:e.18;j<d;j++){17 i=e[j];7(i.4g){a=c(i).3K();7(b)14 a;f.1t(a)}}14 f}7(7H.1e(b.1k)&&!c.1x.ar)14 b.2w("2D")===1c?"2W":b.2D;14(b.2D||"").1q(ao,"")}14 w}17 o=c.1u(a);14 6.1i(12(k){17 n=c(6),r=a;7(6.1f===1){7(o)r=a.1j(6,k,n.3K());7(1g r==="3M")r+="";7(c.2t(r)&&7H.1e(6.1k))6.2V=c.3I(n.3K(),r)>=0;1b 7(c.1n(6,"2j")){17 u=c.36(r);c("49",6).1i(12(){6.4g=c.3I(c(6).3K(),u)>=0});7(!u.18)6.5r=-1}1b 6.2D=r}})}});c.1w({5w:{3K:1a,2e:1a,2U:1a,1K:1a,1h:1a,2p:1a,2H:1a,1L:1a},3s:12(a,b,d,f){7(!a||a.1f===3||a.1f===8)14 w;7(f&&b 1r c.5w)14 c(a)[b](d);f=a.1f!==1||!c.7o(a);17 e=d!==w;b=f&&c.4y[b]||b;7(a.1f===1){17 j=$a.1e(b);7(b 1r a&&f&&!j){7(e){b==="1k"&&ab.1e(a.1n)&&a.1p&&c.2o("1k bq bk\'t be bf");a[b]=d}7(c.1n(a,"4V")&&a.3X(b))14 a.3X(b).5L;7(b==="7A")14(b=a.3X("7A"))&&b.aW?b.2D:bb.1e(a.1n)||cb.1e(a.1n)&&a.2Q?0:w;14 a[b]}7(!c.1x.1l&&f&&b==="1l"){7(e)a.1l.aU=""+d;14 a.1l.aU}e&&a.aO(b,""+d);a=!c.1x.aN&&f&&j?a.2w(b,2):a.2w(b);14 a===1c?w:a}14 c.1l(a,b,d)}});17 O=/\\.(.*)$/,db=12(a){14 a.1q(/[^\\w\\s\\.\\|`]/g,12(b){14"\\\\"+b})};c.1m={2l:12(a,b,d,f){7(!(a.1f===3||a.1f===8)){7(a.5e&&a!==A&&!a.aL)a=A;17 e,j;7(d.2v){e=d;d=e.2v}7(!d.1Z)d.1Z=c.1Z++;7(j=c.1h(a)){17 i=j.2u=j.2u||{},o=j.29;7(!o)j.29=o=12(){14 1g c!=="2M"&&!c.1m.7O?c.1m.29.1E(o.1o,1s):w};o.1o=a;b=b.2A(" ");19(17 k,n=0,r;k=b[n++];){j=e?c.1w({},e):{2v:d,1h:f};7(k.1P(".")>-1){r=k.2A(".");k=r.3a();j.4O=r.27(0).3V().3D(".")}1b{r=[];j.4O=""}j.1k=k;j.1Z=d.1Z;17 u=i[k],z=c.1m.31[k]||{};7(!u){u=i[k]=[];7(!z.3e||z.3e.1j(a,f,r,o)===1d)7(a.37)a.37(k,o,1d);1b a.33&&a.33("2W"+k,o)}7(z.2l){z.2l.1j(a,j);7(!j.2v.1Z)j.2v.1Z=d.1Z}u.1t(j);c.1m.2S[k]=1a}a=1c}}},2S:{},2b:12(a,b,d,f){7(!(a.1f===3||a.1f===8)){17 e,j=0,i,o,k,n,r,u,z=c.1h(a),C=z&&z.2u;7(z&&C){7(b&&b.1k){d=b.2v;b=b.1k}7(!b||1g b==="1y"&&b.bd(0)==="."){b=b||"";19(e 1r C)c.1m.2b(a,e+b)}1b{19(b=b.2A(" ");e=b[j++];){n=e;i=e.1P(".")<0;o=[];7(!i){o=e.2A(".");e=o.3a();k=2F 4P("(^|\\\\.)"+c.2f(o.27(0).3V(),db).3D("\\\\.(?:.*\\\\.)?")+"(\\\\.|$)")}7(r=C[e])7(d){n=c.1m.31[e]||{};19(B=f||0;B<r.18;B++){u=r[B];7(d.1Z===u.1Z){7(i||k.1e(u.4O)){f==1c&&r.2y(B--,1);n.2b&&n.2b.1j(a,u)}7(f!=1c)25}}7(r.18===0||f!=1c&&r.18===1){7(!n.3q||n.3q.1j(a,o)===1d)6C(a,e,z.29);2q C[e]}}1b 19(17 B=0;B<r.18;B++){u=r[B];7(i||k.1e(u.4O)){c.1m.2b(a,n,u.2v,B);r.2y(B--,1)}}}7(c.5j(C)){7(b=z.29)b.1o=1c;2q z.2u;2q z.29;c.5j(z)&&c.4R(a)}}}}},2k:12(a,b,d,f){17 e=a.1k||a;7(!f){a=1g a==="1S"?a[G]?a:c.1w(c.3U(e),a):c.3U(e);7(e.1P("!")>=0){a.1k=e=e.27(0,-1);a.9P=1a}7(!d){a.3W();c.1m.2S[e]&&c.1i(c.2O,12(){6.2u&&6.2u[e]&&c.1m.2k(a,b,6.29.1o)})}7(!d||d.1f===3||d.1f===8)14 w;a.4X=w;a.2d=d;b=c.36(b);b.6U(a)}a.50=d;(f=c.1h(d,"29"))&&f.1E(d,b);f=d.1p||d.1H;24{7(!(d&&d.1n&&c.4x[d.1n.1F()]))7(d["2W"+e]&&d["2W"+e].1E(d,b)===1d)a.4X=1d}23(j){}7(!a.6O()&&f)c.1m.2k(a,b,f,1a);1b 7(!a.6P()){f=a.2d;17 i,o=c.1n(f,"a")&&e==="4c",k=c.1m.31[e]||{};7((!k.2L||k.2L.1j(d,a)===1d)&&!o&&!(f&&f.1n&&c.4x[f.1n.1F()])){24{7(f[e]){7(i=f["2W"+e])f["2W"+e]=1c;c.1m.7O=1a;f[e]()}}23(n){}7(i)f["2W"+e]=i;c.1m.7O=1d}}},29:12(a){17 b,d,f,e;a=1s[0]=c.1m.71(a||A.1m);a.50=6;b=a.1k.1P(".")<0&&!a.9P;7(!b){d=a.1k.2A(".");a.1k=d.3a();f=2F 4P("(^|\\\\.)"+d.27(0).3V().3D("\\\\.(?:.*\\\\.)?")+"(\\\\.|$)")}e=c.1h(6,"2u");d=e[a.1k];7(e&&d){d=d.27(0);e=0;19(17 j=d.18;e<j;e++){17 i=d[e];7(b||f.1e(i.4O)){a.2v=i.2v;a.1h=i.1h;a.41=i;i=i.2v.1E(6,1s);7(i!==w){a.4X=i;7(i===1d){a.3k();a.3W()}}7(a.6K())25}}}14 a.4X},4y:"bg bh bj bi 2C bt 4L 6x 9A 7y 50 1h bu bH 63 2v 4d bB bw 6B c0 bz bA by 6H 9z bx bv 4D bC bG bF 9F 2d 9C bD bE 4T".2A(" "),71:12(a){7(a[G])14 a;17 b=a;a=c.3U(b);19(17 d=6.4y.18,f;d;){f=6.4y[--d];a[f]=b[f]}7(!a.2d)a.2d=a.9F||s;7(a.2d.1f===3)a.2d=a.2d.1p;7(!a.4D&&a.63)a.4D=a.63===a.2d?a.9C:a.63;7(a.6H==1c&&a.6x!=1c){b=s.1T;d=s.1M;a.6H=a.6x+(b&&b.3i||d&&d.3i||0)-(b&&b.6n||d&&d.6n||0);a.9z=a.9A+(b&&b.3n||d&&d.3n||0)-(b&&b.5v||d&&d.5v||0)}7(!a.4T&&(a.4L||a.4L===0?a.4L:a.4d))a.4T=a.4L||a.4d;7(!a.6B&&a.7y)a.6B=a.7y;7(!a.4T&&a.2C!==w)a.4T=a.2C&1?1:a.2C&2?3:a.2C&4?2:0;14 a},1Z:bl,46:c.46,31:{2K:{3e:c.7c,3q:c.4W},3b:{2l:12(a){c.1m.2l(6,a.4n,c.1w({},a,{2v:7d}))},2b:12(a){17 b=1a,d=a.4n.1q(O,"");c.1i(c.1h(6,"2u").3b||[],12(){7(d===6.4n.1q(O,""))14 b=1d});b&&c.1m.2b(6,a.4n,7d)}},bp:{3e:12(a,b,d){7(6.5e)6.6I=d;14 1d},3q:12(a,b){7(6.6I===b)6.6I=1c}}}};17 6C=s.5Q?12(a,b,d){a.5Q(b,d,1d)}:12(a,b,d){a.7r("2W"+b,d)};c.3U=12(a){7(!6.3k)14 2F c.3U(a);7(a&&a.1k){6.78=a;6.1k=a.1k}1b 6.1k=a;6.bI=J();6[G]=1a};c.3U.2h={3k:12(){6.6P=Z;17 a=6.78;7(a){a.3k&&a.3k();a.bJ=1d}},3W:12(){6.6O=Z;17 a=6.78;7(a){a.3W&&a.3W();a.c4=1a}},c1:12(){6.6K=Z;6.3W()},6P:Y,6O:Y,6K:Y};17 6V=12(a){17 b=a.4D;24{19(;b&&b!==6;)b=b.1p;7(b!==6){a.1k=a.1h;c.1m.29.1E(6,1s)}}23(d){}},6M=12(a){a.1k=a.1h;c.1m.29.1E(6,1s)};c.1i({4q:"6A",4r:"6G"},12(a,b){c.1m.31[a]={3e:12(d){c.1m.2l(6,b,d&&d.1J?6M:6V,a)},3q:12(d){c.1m.2b(6,b,d&&d.1J?6M:6V)}}});7(!c.1x.8y)c.1m.31.3v={3e:12(){7(6.1n.1F()!=="4V"){c.1m.2l(6,"4c.74",12(a){17 b=a.2d,d=b.1k;7((d==="3v"||d==="6L")&&c(b).57("4V").18)14 6W("3v",6,1s)});c.1m.2l(6,"9D.74",12(a){17 b=a.2d,d=b.1k;7((d==="1K"||d==="4J")&&c(b).57("4V").18&&a.4d===13)14 6W("3v",6,1s)})}1b 14 1d},3q:12(){c.1m.2b(6,".74")}};7(!c.1x.9k){17 da=/52|38|2j/i,ea,6Z=12(a){17 b=a.1k,d=a.2D;7(b==="3f"||b==="3Y")d=a.2V;1b 7(b==="2j-69")d=a.5r>-1?c.2f(a.1C,12(f){14 f.4g}).3D("-"):"";1b 7(a.1n.1F()==="2j")d=a.5r;14 d},66=12(a,b){17 d=a.2d,f,e;7(!(!da.1e(d.1n)||d.9n)){f=c.1h(d,"6Y");e=6Z(d);7(a.1k!=="5c"||d.1k!=="3f")c.1h(d,"6Y",e);7(!(f===w||e===f))7(f!=1c||e){a.1k="4u";14 c.1m.2k(a,b,d)}}};c.1m.31.4u={2T:{5c:66,4c:12(a){17 b=a.2d,d=b.1k;7(d==="3f"||d==="3Y"||b.1n.1F()==="2j")14 66.1j(6,a)},af:12(a){17 b=a.2d,d=b.1k;7(a.4d===13&&b.1n.1F()!=="52"||a.4d===32&&(d==="3Y"||d==="3f")||d==="2j-69")14 66.1j(6,a)},cc:12(a){a=a.2d;c.1h(a,"6Y",6Z(a))}},3e:12(){7(6.1k==="4S")14 1d;19(17 a 1r ea)c.1m.2l(6,a+".9c",ea[a]);14 da.1e(6.1n)},3q:12(){c.1m.2b(6,".9c");14 da.1e(6.1n)}};ea=c.1m.31.4u.2T}s.37&&c.1i({5y:"7J",6m:"5c"},12(a,b){12 d(f){f=c.1m.71(f);f.1k=b;14 c.1m.29.1j(6,f)}c.1m.31[b]={3e:12(){6.37(a,d,1a)},3q:12(){6.5Q(a,d,1a)}}});c.1i(["8f","5i"],12(a,b){c.1v[b]=12(d,f,e){7(1g d==="1S"){19(17 j 1r d)6[b](j,f,d[j],e);14 6}7(c.1u(f)){e=f;f=w}17 i=b==="5i"?c.46(e,12(k){c(6).4z(k,i);14 e.1E(6,1s)}):e;7(d==="a1"&&b!=="5i")6.5i(d,f,e);1b{j=0;19(17 o=6.18;j<o;j++)c.1m.2l(6[j],d,i,f)}14 6}});c.1v.1w({4z:12(a,b){7(1g a==="1S"&&!a.3k)19(17 d 1r a)6.4z(d,a[d]);1b{d=0;19(17 f=6.18;d<f;d++)c.1m.2b(6[d],a,b)}14 6},ce:12(a,b,d,f){14 6.3b(b,d,f,a)},b4:12(a,b,d){14 1s.18===0?6.4z("3b"):6.8H(b,1c,d,a)},2k:12(a,b){14 6.1i(12(){c.1m.2k(a,b,6)})},6l:12(a,b){7(6[0]){a=c.3U(a);a.3k();a.3W();c.1m.2k(a,b,6[0]);14 a.4X}},3N:12(a){19(17 b=1s,d=1;d<b.18;)c.46(a,b[d++]);14 6.4c(c.46(a,12(f){17 e=(c.1h(6,"8u"+a.1Z)||0)%d;c.1h(6,"8u"+a.1Z,e+1);f.3k();14 b[e].1E(6,1s)||1d}))},av:12(a,b){14 6.4q(a).4r(b||a)}});17 7l={5y:"7J",6m:"5c",4q:"6A",4r:"6G"};c.1i(["3b","8H"],12(a,b){c.1v[b]=12(d,f,e,j){17 i,o=0,k,n,r=j||6.1J,u=j?6:c(6.26);7(c.1u(f)){e=f;f=w}19(d=(d||"").2A(" ");(i=d[o++])!=1c;){j=O.1N(i);k="";7(j){k=j[0];i=i.1q(O,"")}7(i==="av")d.1t("4q"+k,"4r"+k);1b{n=i;7(i==="5y"||i==="6m"){d.1t(7l[i]+k);i+=k}1b i=(7l[i]||i)+k;b==="3b"?u.1i(12(){c.1m.2l(6,6y(i,r),{1h:f,1J:r,2v:e,4n:i,9x:e,6w:n})}):u.4z(6y(i,r),e)}}14 6}});c.1i("6m 5y 7J 5c 5t ci 68 a1 4c dS dT dR dQ 6A 6G 4q 4r 4u 2j 3v af 9D dO 2o".2A(" "),12(a,b){c.1v[b]=12(d){14 d?6.8f(b,d):6.2k(b)};7(c.5w)c.5w[b]=1a});A.33&&!A.37&&A.33("dP",12(){19(17 a 1r c.2O)7(c.2O[a].29)24{c.1m.2b(c.2O[a].29.1o)}23(b){}});(12(){12 a(g){19(17 h="",l,m=0;g[m];m++){l=g[m];7(l.1f===3||l.1f===4)h+=l.5L;1b 7(l.1f!==8)h+=a(l.2B)}14 h}12 b(g,h,l,m,q,p){q=0;19(17 v=m.18;q<v;q++){17 t=m[q];7(t){t=t[g];19(17 y=1d;t;){7(t.4f===l){y=m[t.5S];25}7(t.1f===1&&!p){t.4f=l;t.5S=q}7(t.1n.1F()===h){y=t;25}t=t[g]}m[q]=y}}}12 d(g,h,l,m,q,p){q=0;19(17 v=m.18;q<v;q++){17 t=m[q];7(t){t=t[g];19(17 y=1d;t;){7(t.4f===l){y=m[t.5S];25}7(t.1f===1){7(!p){t.4f=l;t.5S=q}7(1g h!=="1y"){7(t===h){y=1a;25}}1b 7(k.1z(h,[t]).18>0){y=t;25}}t=t[g]}m[q]=y}}}17 f=/((?:\\((?:\\([^()]+\\)|[^()]+)+\\)|\\[(?:\\[[^[\\]]*\\]|[\'"][^\'"]*[\'"]|[^[\\]\'"]+)+\\]|\\\\.|[^ >+~,(\\[\\\\]+)+|[>+~])(\\s*,\\s*)?((?:.|\\r|\\n)*)/g,e=0,j=5P.2h.aR,i=1d,o=1a;[0,0].3V(12(){o=1d;14 0});17 k=12(g,h,l,m){l=l||[];17 q=h=h||s;7(h.1f!==1&&h.1f!==9)14[];7(!g||1g g!=="1y")14 l;19(17 p=[],v,t,y,S,H=1a,M=x(h),I=g;(f.1N(""),v=f.1N(I))!==1c;){I=v[3];p.1t(v[1]);7(v[2]){S=v[3];25}}7(p.18>1&&r.1N(g))7(p.18===2&&n.2Y[p[0]])t=5p(p[0]+p[1],h);1b 19(t=n.2Y[p[0]]?[h]:k(p.3a(),h);p.18;){g=p.3a();7(n.2Y[g])g+=p.3a();t=5p(g,t)}1b{7(!m&&p.18>1&&h.1f===9&&!M&&n.2c.39.1e(p[0])&&!n.2c.39.1e(p[p.18-1])){v=k.1V(p.3a(),h,M);h=v.21?k.1z(v.21,v.4s)[0]:v.4s[0]}7(h){v=m?{21:p.5C(),4s:z(m)}:k.1V(p.5C(),p.18===1&&(p[0]==="~"||p[0]==="+")&&h.1p?h.1p:h,M);t=v.21?k.1z(v.21,v.4s):v.4s;7(p.18>0)y=z(t);1b H=1d;19(;p.18;){17 D=p.5C();v=D;7(n.2Y[D])v=p.5C();1b D="";7(v==1c)v=h;n.2Y[D](y,v,M)}}1b y=[]}y||(y=t);y||k.2o(D||g);7(j.1j(y)==="[1S 2Z]")7(H)7(h&&h.1f===1)19(g=0;y[g]!=1c;g++){7(y[g]&&(y[g]===1a||y[g].1f===1&&E(h,y[g])))l.1t(t[g])}1b 19(g=0;y[g]!=1c;g++)y[g]&&y[g].1f===1&&l.1t(t[g]);1b l.1t.1E(l,y);1b z(y,l);7(S){k(S,q,l,m);k.6X(l)}14 l};k.6X=12(g){7(B){i=o;g.3V(B);7(i)19(17 h=1;h<g.18;h++)g[h]===g[h-1]&&g.2y(h--,1)}14 g};k.8T=12(g,h){14 k(g,1c,1c,h)};k.1V=12(g,h,l){17 m,q;7(!g)14[];19(17 p=0,v=n.5k.18;p<v;p++){17 t=n.5k[p];7(q=n.5X[t].1N(g)){17 y=q[1];q.2y(1,1);7(y.6s(y.18-1)!=="\\\\"){q[1]=(q[1]||"").1q(/\\\\/g,"");m=n.1V[t](q,h,l);7(m!=1c){g=g.1q(n.2c[t],"");25}}}}m||(m=h.28("*"));14{4s:m,21:g}};k.1z=12(g,h,l,m){19(17 q=g,p=[],v=h,t,y,S=h&&h[0]&&x(h[0]);g&&h.18;){19(17 H 1r n.1z)7((t=n.5X[H].1N(g))!=1c&&t[2]){17 M=n.1z[H],I,D;D=t[1];y=1d;t.2y(1,1);7(D.6s(D.18-1)!=="\\\\"){7(v===p)p=[];7(n.7q[H])7(t=n.7q[H](t,v,l,p,m,S)){7(t===1a)dU}1b y=I=1a;7(t)19(17 U=0;(D=v[U])!=1c;U++)7(D){I=M(D,t,U,v);17 7m=m^!!I;7(l&&I!=1c)7(7m)y=1a;1b v[U]=1d;1b 7(7m){p.1t(D);y=1a}}7(I!==w){l||(v=p);g=g.1q(n.2c[H],"");7(!y)14[];25}}}7(g===q)7(y==1c)k.2o(g);1b 25;q=g}14 v};k.2o=12(g){8C"8D 2o, 8E 8K: "+g};17 n=k.93={5k:["39","7p","4a"],2c:{39:/#((?:[\\w\\3Z-\\40-]|\\\\.)+)/,59:/\\.((?:[\\w\\3Z-\\40-]|\\\\.)+)/,7p:/\\[34=[\'"]*((?:[\\w\\3Z-\\40-]|\\\\.)+)[\'"]*\\]/,77:/\\[\\s*((?:[\\w\\3Z-\\40-]|\\\\.)+)\\s*(?:(\\S?=)\\s*([\'"]*)(.*?)\\3|)\\s*\\]/,4a:/^((?:[\\w\\3Z-\\40\\*-]|\\\\.)+)/,5E:/:(8M|3h|5g|4e)-dV(?:\\((5V|5U|[\\dn+-]*)\\))?/,3A:/:(3h|48|8I|az|4e|5g|5V|5U)(?:\\((\\d*)\\))?(?=[^-]|$)/,53:/:((?:[\\w\\3Z-\\40-]|\\\\.)+)(?:\\(([\'"]?)((?:\\([^\\)]+\\)|[^\\(\\)]*)+)\\2\\))?/},5X:{},6T:{"3c":"1U","19":"9s"},5Z:{2Q:12(g){14 g.2w("2Q")}},2Y:{"+":12(g,h){17 l=1g h==="1y",m=l&&!/\\W/.1e(h);l=l&&!m;7(m)h=h.1F();m=0;19(17 q=g.18,p;m<q;m++)7(p=g[m]){19(;(p=p.4b)&&p.1f!==1;);g[m]=l||p&&p.1n.1F()===h?p||1d:p===h}l&&k.1z(h,g,1a)},">":12(g,h){17 l=1g h==="1y";7(l&&!/\\W/.1e(h)){h=h.1F();19(17 m=0,q=g.18;m<q;m++){17 p=g[m];7(p){l=p.1p;g[m]=l.1n.1F()===h?l:1d}}}1b{m=0;19(q=g.18;m<q;m++)7(p=g[m])g[m]=l?p.1p:p.1p===h;l&&k.1z(h,g,1a)}},"":12(g,h,l){17 m=e++,q=d;7(1g h==="1y"&&!/\\W/.1e(h)){17 p=h=h.1F();q=b}q("1p",h,m,g,p,l)},"~":12(g,h,l){17 m=e++,q=d;7(1g h==="1y"&&!/\\W/.1e(h)){17 p=h=h.1F();q=b}q("4b",h,m,g,p,l)}},1V:{39:12(g,h,l){7(1g h.4l!=="2M"&&!l)14(g=h.4l(g[1]))?[g]:[]},7p:12(g,h){7(1g h.9q!=="2M"){17 l=[];h=h.9q(g[1]);19(17 m=0,q=h.18;m<q;m++)h[m].2w("34")===g[1]&&l.1t(h[m]);14 l.18===0?1c:l}},4a:12(g,h){14 h.28(g[1])}},7q:{59:12(g,h,l,m,q,p){g=" "+g[1].1q(/\\\\/g,"")+" ";7(p)14 g;p=0;19(17 v;(v=h[p])!=1c;p++)7(v)7(q^(v.1U&&(" "+v.1U+" ").1q(/[\\t\\n]/g," ").1P(g)>=0))l||m.1t(v);1b 7(l)h[p]=1d;14 1d},39:12(g){14 g[1].1q(/\\\\/g,"")},4a:12(g){14 g[1].1F()},5E:12(g){7(g[1]==="3h"){17 h=/(-?)(\\d*)n((?:\\+|-)?\\d*)/.1N(g[2]==="5V"&&"2n"||g[2]==="5U"&&"2n+1"||!/\\D/.1e(g[2])&&"e0+"+g[2]||g[2]);g[2]=h[1]+(h[2]||1)-0;g[3]=h[3]-0}g[0]=e++;14 g},77:12(g,h,l,m,q,p){h=g[1].1q(/\\\\/g,"");7(!p&&n.6T[h])g[1]=n.6T[h];7(g[2]==="~=")g[4]=" "+g[4]+" ";14 g},53:12(g,h,l,m,q){7(g[1]==="4E")7((f.1N(g[3])||"").18>1||/^\\w/.1e(g[3]))g[3]=k(g[3],1c,1c,h);1b{g=k.1z(g[3],h,l,1a^q);l||m.1t.1E(m,g);14 1d}1b 7(n.2c.3A.1e(g[0])||n.2c.5E.1e(g[0]))14 1a;14 g},3A:12(g){g.6U(1a);14 g}},2T:{dZ:12(g){14 g.5K===1d&&g.1k!=="2N"},5K:12(g){14 g.5K===1a},2V:12(g){14 g.2V===1a},4g:12(g){14 g.4g===1a},6o:12(g){14!!g.1A},4j:12(g){14!g.1A},8Y:12(g,h,l){14!!k(l[3],g).18},dY:12(g){14/h\\d/i.1e(g.1n)},1K:12(g){14"1K"===g.1k},3f:12(g){14"3f"===g.1k},3Y:12(g){14"3Y"===g.1k},4S:12(g){14"4S"===g.1k},4J:12(g){14"4J"===g.1k},3v:12(g){14"3v"===g.1k},6L:12(g){14"6L"===g.1k},8v:12(g){14"8v"===g.1k},2C:12(g){14"2C"===g.1k||g.1n.1F()==="2C"},38:12(g){14/38|2j|52|2C/i.1e(g.1n)}},8q:{4e:12(g,h){14 h===0},5g:12(g,h,l,m){14 h===m.18-1},5V:12(g,h){14 h%2===0},5U:12(g,h){14 h%2===1},az:12(g,h,l){14 h<l[3]-0},8I:12(g,h,l){14 h>l[3]-0},3h:12(g,h,l){14 l[3]-0===h},48:12(g,h,l){14 l[3]-0===h}},1z:{53:12(g,h,l,m){17 q=h[1],p=n.2T[q];7(p)14 p(g,l,h,m);1b 7(q==="54")14(g.8G||g.dW||a([g])||"").1P(h[3])>=0;1b 7(q==="4E"){h=h[3];l=0;19(m=h.18;l<m;l++)7(h[l]===g)14 1d;14 1a}1b k.2o("8D 2o, 8E 8K: "+q)},5E:12(g,h){17 l=h[1],m=g;dX(l){61"8M":61"4e":19(;m=m.4b;)7(m.1f===1)14 1d;7(l==="4e")14 1a;m=g;61"5g":19(;m=m.3d;)7(m.1f===1)14 1d;14 1a;61"3h":l=h[2];17 q=h[3];7(l===1&&q===0)14 1a;h=h[0];17 p=g.1p;7(p&&(p.4f!==h||!g.76)){17 v=0;19(m=p.1A;m;m=m.3d)7(m.1f===1)m.76=++v;p.4f=h}g=g.76-q;14 l===0?g===0:g%l===0&&g/l>=0}},39:12(g,h){14 g.1f===1&&g.2w("5a")===h},4a:12(g,h){14 h==="*"&&g.1f===1||g.1n.1F()===h},59:12(g,h){14(" "+(g.1U||g.2w("3c"))+" ").1P(h)>-1},77:12(g,h){17 l=h[1];g=n.5Z[l]?n.5Z[l](g):g[l]!=1c?g[l]:g.2w(l);l=g+"";17 m=h[2];h=h[4];14 g==1c?m==="!=":m==="="?l===h:m==="*="?l.1P(h)>=0:m==="~="?(" "+l+" ").1P(h)>=0:!h?l&&g!==1d:m==="!="?l!==h:m==="^="?l.1P(h)===0:m==="$="?l.6s(l.18-h.18)===h:m==="|="?l===h||l.6s(0,h.18+1)===h+"-":1d},3A:12(g,h,l,m){17 q=n.8q[h[2]];7(q)14 q(g,l,h,m)}}},r=n.2c.3A;19(17 u 1r n.2c){n.2c[u]=2F 4P(n.2c[u].5A+/(?![^\\[]*\\])(?![^\\(]*\\))/.5A);n.5X[u]=2F 4P(/(^(?:.|\\r|\\n)*?)/.5A+n.2c[u].5A.1q(/\\\\(\\d+)/g,12(g,h){14"\\\\"+(h-0+1)}))}17 z=12(g,h){g=2Z.2h.27.1j(g,0);7(h){h.1t.1E(h,g);14 h}14 g};24{2Z.2h.27.1j(s.1T.2B,0)}23(C){z=12(g,h){h=h||[];7(j.1j(g)==="[1S 2Z]")2Z.2h.1t.1E(h,g);1b 7(1g g.18==="3M")19(17 l=0,m=g.18;l<m;l++)h.1t(g[l]);1b 19(l=0;g[l];l++)h.1t(g[l]);14 h}}17 B;7(s.1T.3P)B=12(g,h){7(!g.3P||!h.3P){7(g==h)i=1a;14 g.3P?-1:1}g=g.3P(h)&4?-1:g===h?0:1;7(g===0)i=1a;14 g};1b 7("4i"1r s.1T)B=12(g,h){7(!g.4i||!h.4i){7(g==h)i=1a;14 g.4i?-1:1}g=g.4i-h.4i;7(g===0)i=1a;14 g};1b 7(s.70)B=12(g,h){7(!g.1H||!h.1H){7(g==h)i=1a;14 g.1H?-1:1}17 l=g.1H.70(),m=h.1H.70();l.8U(g,0);l.9l(g,0);m.8U(h,0);m.9l(h,0);g=l.dN(e2.dC,m);7(g===0)i=1a;14 g};(12(){17 g=s.1O("1G"),h="2a"+(2F 9p).9e();g.2I="<a 34=\'"+h+"\'/>";17 l=s.1T;l.2R(g,l.1A);7(s.4l(h)){n.1V.39=12(m,q,p){7(1g q.4l!=="2M"&&!p)14(q=q.4l(m[1]))?q.5a===m[1]||1g q.3X!=="2M"&&q.3X("5a").5L===m[1]?[q]:w:[]};n.1z.39=12(m,q){17 p=1g m.3X!=="2M"&&m.3X("5a");14 m.1f===1&&p&&p.5L===q}}l.2s(g);l=g=1c})();(12(){17 g=s.1O("1G");g.2P(s.dD(""));7(g.28("*").18>0)n.1V.4a=12(h,l){l=l.28(h[1]);7(h[1]==="*"){h=[];19(17 m=0;l[m];m++)l[m].1f===1&&h.1t(l[m]);l=h}14 l};g.2I="<a 2Q=\'#\'></a>";7(g.1A&&1g g.1A.2w!=="2M"&&g.1A.2w("2Q")!=="#")n.5Z.2Q=12(h){14 h.2w("2Q",2)};g=1c})();s.5N&&12(){17 g=k,h=s.1O("1G");h.2I="<p 3c=\'97\'></p>";7(!(h.5N&&h.5N(".97").18===0)){k=12(m,q,p,v){q=q||s;7(!v&&q.1f===9&&!x(q))24{14 z(q.5N(m),p)}23(t){}14 g(m,q,p,v)};19(17 l 1r g)k[l]=g[l];h=1c}}();(12(){17 g=s.1O("1G");g.2I="<1G 3c=\'1e e\'></1G><1G 3c=\'1e\'></1G>";7(!(!g.58||g.58("e").18===0)){g.7i.1U="e";7(g.58("e").18!==1){n.5k.2y(1,0,"59");n.1V.59=12(h,l,m){7(1g l.58!=="2M"&&!m)14 l.58(h[1])};g=1c}}})();17 E=s.3P?12(g,h){14!!(g.3P(h)&16)}:12(g,h){14 g!==h&&(g.54?g.54(h):1a)},x=12(g){14(g=(g?g.1H||g:0).1T)?g.1n!=="dB":1d},5p=12(g,h){17 l=[],m="",q;19(h=h.1f?[h]:h;q=n.2c.53.1N(g);){m+=q[0];g=g.1q(n.2c.53,"")}g=n.2Y[g]?g+"*":g;q=0;19(17 p=h.18;q<p;q++)k(g,h[q],l);14 k.1z(m,l)};c.1V=k;c.21=k.93;c.21[":"]=c.21.2T;c.6N=k.6X;c.1K=a;c.7o=x;c.54=E})();17 eb=/dA$/,8A=/^(?:9r|9i|9m)/,8s=/,/;R=2Z.2h.27;17 72=12(a,b,d){7(c.1u(b))14 c.45(a,12(e,j){14!!b.1j(e,j,e)===d});1b 7(b.1f)14 c.45(a,12(e){14 e===b===d});1b 7(1g b==="1y"){17 f=c.45(a,12(e){14 e.1f===1});7(8V.1e(b))14 c.1z(b,f,!d);1b b=c.1z(b,f)}14 c.45(a,12(e){14 c.3I(e,b)>=0===d})};c.1v.1w({1V:12(a){19(17 b=6.2J("","1V",a),d=0,f=0,e=6.18;f<e;f++){d=b.18;c.1V(a,6[f],b);7(f>0)19(17 j=d;j<b.18;j++)19(17 i=0;i<d;i++)7(b[i]===b[j]){b.2y(j--,1);25}}14 b},8Y:12(a){17 b=c(a);14 6.1z(12(){19(17 d=0,f=b.18;d<f;d++)7(c.54(6,b[d]))14 1a})},4E:12(a){14 6.2J(72(6,a,1d),"4E",a)},1z:12(a){14 6.2J(72(6,a,1a),"1z",a)},4h:12(a){14!!a&&c.1z(a,6).18>0},57:12(a,b){7(c.2t(a)){17 d=[],f=6[0],e,j={},i;7(f&&a.18){e=0;19(17 o=a.18;e<o;e++){i=a[e];j[i]||(j[i]=c.21.2c.3A.1e(i)?c(i,b||6.26):i)}19(;f&&f.1H&&f!==b;){19(i 1r j){e=j[i];7(e.4H?e.5x(f)>-1:c(f).4h(e)){d.1t({1J:i,1o:f});2q j[i]}}f=f.1p}}14 d}17 k=c.21.2c.3A.1e(a)?c(a,b||6.26):1c;14 6.2f(12(n,r){19(;r&&r.1H&&r!==b;){7(k?k.5x(r)>-1:c(r).4h(a))14 r;r=r.1p}14 1c})},5x:12(a){7(!a||1g a==="1y")14 c.3I(6[0],a?c(a):6.6o().9v());14 c.3I(a.4H?a[0]:a,6)},2l:12(a,b){a=1g a==="1y"?c(a,b||6.26):c.36(a);b=c.3C(6.3L(),a);14 6.2J(73(a[0])||73(b[0])?b:c.6N(b))},dy:12(){14 6.2l(6.75)}});c.1i({6o:12(a){14(a=a.1p)&&a.1f!==11?a:1c},9r:12(a){14 c.3B(a,"1p")},dz:12(a,b,d){14 c.3B(a,"1p",d)},dE:12(a){14 c.3h(a,2,"3d")},dF:12(a){14 c.3h(a,2,"4b")},dK:12(a){14 c.3B(a,"3d")},9m:12(a){14 c.3B(a,"4b")},dL:12(a,b,d){14 c.3B(a,"3d",d)},9i:12(a,b,d){14 c.3B(a,"4b",d)},dJ:12(a){14 c.6J(a.1p.1A,a)},9v:12(a){14 c.6J(a.1A)},aB:12(a){14 c.1n(a,"dI")?a.dG||a.dH.35:c.36(a.2B)}},12(a,b){c.1v[a]=12(d,f){17 e=c.2f(6,b,d);eb.1e(a)||(f=d);7(f&&1g f==="1y")e=c.1z(f,e);e=6.18>1?c.6N(e):e;7((6.18>1||8s.1e(f))&&8A.1e(a))e=e.e1();14 6.2J(e,a,R.1j(1s).3D(","))}});c.1w({1z:12(a,b,d){7(d)a=":4E("+a+")";14 c.1V.8T(a,b)},3B:12(a,b,d){17 f=[];19(a=a[b];a&&a.1f!==9&&(d===w||a.1f!==1||!c(a).4h(d));){a.1f===1&&f.1t(a);a=a[b]}14 f},3h:12(a,b,d){b=b||1;19(17 f=0;a;a=a[d])7(a.1f===1&&++f===b)25;14 a},6J:12(a,b){19(17 d=[];a;a=a.3d)a.1f===1&&a!==b&&d.1t(a);14 d}});17 7t=/ 5h\\d+="(?:\\d+|1c)"/g,V=/^\\s+/,7g=/(<([\\w:]+)[^>]*?)\\/>/g,8J=/^(?:6R|br|9K|6Q|eo|em|38|6S|e9|4I)$/i,7j=/<([\\w:]+)/,aJ=/<2r/i,aX=/<|&#?\\w+;/,7x=/<2a|<1S|<6Q|<49|<1l/i,7w=/2V\\s*(?:[^=]|=\\s*.2V.)/i,7k=12(a,b,d){14 8J.1e(d)?a:b+"></"+d+">"},F={49:[1,"<2j 69=\'69\'>","</2j>"],e7:[1,"<91>","</91>"],ay:[1,"<2i>","</2i>"],3T:[2,"<2i><2r>","</2r></2i>"],5m:[3,"<2i><2r><3T>","</3T></2r></2i>"],9K:[2,"<2i><2r></2r><79>","</79></2i>"],6R:[1,"<2f>","</2f>"],2L:[0,"",""]};F.e6=F.49;F.2r=F.e3=F.79=F.e4=F.ay;F.el=F.5m;7(!c.1x.9w)F.2L=[1,"1G<1G>","</1G>"];c.1v.1w({1K:12(a){7(c.1u(a))14 6.1i(12(b){17 d=c(6);d.1K(a.1j(6,b,d.1K()))});7(1g a!=="1S"&&a!==w)14 6.4j().2X((6[0]&&6[0].1H||s).4v(a));14 c.1K(6)},65:12(a){7(c.1u(a))14 6.1i(12(d){c(6).65(a.1j(6,d))});7(6[0]){17 b=c(a,6[0].1H).48(0).7b(1a);6[0].1p&&b.2R(6[0]);b.2f(12(){19(17 d=6;d.1A&&d.1A.1f===1;)d=d.1A;14 d}).2X(6)}14 6},aC:12(a){7(c.1u(a))14 6.1i(12(b){c(6).aC(a.1j(6,b))});14 6.1i(12(){17 b=c(6),d=b.aB();d.18?d.65(a):b.2X(a)})},e5:12(a){14 6.1i(12(){c(6).65(a)})},ec:12(){14 6.6o().1i(12(){c.1n(6,"1M")||c(6).4U(6.2B)}).42()},2X:12(){14 6.3x(1s,1a,12(a){6.1f===1&&6.2P(a)})},aG:12(){14 6.3x(1s,1a,12(a){6.1f===1&&6.2R(a,6.1A)})},5q:12(){7(6[0]&&6[0].1p)14 6.3x(1s,1d,12(b){6.1p.2R(b,6)});1b 7(1s.18){17 a=c(1s[0]);a.1t.1E(a,6.6r());14 6.2J(a,"5q",1s)}},7a:12(){7(6[0]&&6[0].1p)14 6.3x(1s,1d,12(b){6.1p.2R(b,6.3d)});1b 7(1s.18){17 a=6.2J(6,"7a",1s);a.1t.1E(a,c(1s[0]).6r());14 a}},2b:12(a,b){19(17 d=0,f;(f=6[d])!=1c;d++)7(!a||c.1z(a,[f]).18){7(!b&&f.1f===1){c.4A(f.28("*"));c.4A([f])}f.1p&&f.1p.2s(f)}14 6},4j:12(){19(17 a=0,b;(b=6[a])!=1c;a++)19(b.1f===1&&c.4A(b.28("*"));b.1A;)b.2s(b.1A);14 6},7b:12(a){17 b=6.2f(12(){7(!c.1x.7n&&!c.7o(6)){17 d=6.ed,f=6.1H;7(!d){d=f.1O("1G");d.2P(6.3F(1a));d=d.2I}14 c.7f([d.1q(7t,"").1q(/=([^="\'>\\s]+\\/)>/g,\'="$1">\').1q(V,"")],f)[0]}1b 14 6.3F(1a)});7(a===1a){7s(6,b);7s(6.1V("*"),b.1V("*"))}14 b},2U:12(a){7(a===w)14 6[0]&&6[0].1f===1?6[0].2I.1q(7t,""):1c;1b 7(1g a==="1y"&&!7x.1e(a)&&(c.1x.7h||!V.1e(a))&&!F[(7j.1N(a)||["",""])[1].1F()]){a=a.1q(7g,7k);24{19(17 b=0,d=6.18;b<d;b++)7(6[b].1f===1){c.4A(6[b].28("*"));6[b].2I=a}}23(f){6.4j().2X(a)}}1b c.1u(a)?6.1i(12(e){17 j=c(6),i=j.2U();j.4j().2X(12(){14 a.1j(6,e,i)})}):6.4j().2X(a);14 6},4U:12(a){7(6[0]&&6[0].1p){7(c.1u(a))14 6.1i(12(b){17 d=c(6),f=d.2U();d.4U(a.1j(6,b,f))});7(1g a!=="1y")a=c(a).ai();14 6.1i(12(){17 b=6.3d,d=6.1p;c(6).2b();b?c(b).5q(a):c(d).2X(a)})}1b 14 6.2J(c(c.1u(a)?a():a),"4U",a)},ai:12(a){14 6.2b(a,1a)},3x:12(a,b,d){12 f(u){14 c.1n(u,"2i")?u.28("2r")[0]||u.2P(u.1H.1O("2r")):u}17 e,j,i=a[0],o=[],k;7(!c.1x.5Y&&1s.18===3&&1g i==="1y"&&7w.1e(i))14 6.1i(12(){c(6).3x(a,b,d,1a)});7(c.1u(i))14 6.1i(12(u){17 z=c(6);a[0]=i.1j(6,u,b?z.2U():w);z.3x(a,b,d)});7(6[0]){e=i&&i.1p;e=c.1x.1p&&e&&e.1f===11&&e.2B.18===6.18?{4Y:e}:7v(a,6,o);k=e.4Y;7(j=k.2B.18===1?(k=k.1A):k.1A){b=b&&c.1n(j,"3T");19(17 n=0,r=6.18;n<r;n++)d.1j(b?f(6[n],j):6[n],n>0||e.7u||6.18>1?k.3F(1a):k)}o.18&&c.1i(o,aF)}14 6}});c.7e={};c.1i({9j:"2X",ei:"aG",2R:"5q",eg:"7a",ee:"4U"},12(a,b){c.1v[a]=12(d){17 f=[];d=c(d);17 e=6.18===1&&6[0].1p;7(e&&e.1f===11&&e.2B.18===1&&d.18===1){d[b](6[0]);14 6}1b{e=0;19(17 j=d.18;e<j;e++){17 i=(e>0?6.7b(1a):6).3L();c.1v[b].1E(c(d[e]),i);f=f.5J(i)}14 6.2J(f,a,d.1J)}}});c.1w({7f:12(a,b,d,f){b=b||s;7(1g b.1O==="2M")b=b.1H||b[0]&&b[0].1H||s;19(17 e=[],j=0,i;(i=a[j])!=1c;j++){7(1g i==="3M")i+="";7(i){7(1g i==="1y"&&!aX.1e(i))i=b.4v(i);1b 7(1g i==="1y"){i=i.1q(7g,7k);17 o=(7j.1N(i)||["",""])[1].1F(),k=F[o]||F.2L,n=k[0],r=b.1O("1G");19(r.2I=k[1]+i+k[2];n--;)r=r.7i;7(!c.1x.2r){n=aJ.1e(i);o=o==="2i"&&!n?r.1A&&r.1A.2B:k[1]==="<2i>"&&!n?r.2B:[];19(k=o.18-1;k>=0;--k)c.1n(o[k],"2r")&&!o[k].2B.18&&o[k].1p.2s(o[k])}!c.1x.7h&&V.1e(i)&&r.2R(b.4v(V.1N(i)[0]),r.1A);i=r.2B}7(i.1f)e.1t(i);1b e=c.3C(e,i)}}7(d)19(j=0;e[j];j++)7(f&&c.1n(e[j],"2a")&&(!e[j].1k||e[j].1k.1F()==="1K/3G"))f.1t(e[j].1p?e[j].1p.2s(e[j]):e[j]);1b{e[j].1f===1&&e.2y.1E(e,[j+1,0].5J(c.36(e[j].28("2a"))));d.2P(e[j])}14 e},4A:12(a){19(17 b,d,f=c.2O,e=c.1m.31,j=c.1x.5T,i=0,o;(o=a[i])!=1c;i++)7(d=o[c.44]){b=f[d];7(b.2u)19(17 k 1r b.2u)e[k]?c.1m.2b(o,k):6C(o,k,b.29);7(j)2q o[c.44];1b o.4F&&o.4F(c.44);2q f[d]}}});17 9B=/z-?5x|ef-?cf|1X|9G|eh-?2H/i,6v=/9E\\([^)]*\\)/,6F=/1X=([^)]*)/,60=/6D/i,6k=/-([a-z])/ej,ad=/([A-Z])/g,a4=/^-?\\d+(?:3R)?$/i,a3=/^-?\\d/,a8={2g:"62",8Q:"2N",22:"5I"},9S=["aA","e8"],9T=["at","ek"],ac=s.56&&s.56.64,6E=c.1x.6p?"6p":"en",6h=12(a,b){14 b.5z()};c.1v.2e=12(a,b){14 X(6,a,b,1a,12(d,f,e){7(e===w)14 c.1Y(d,f);7(1g e==="3M"&&!9B.1e(f))e+="3R";c.1l(d,f,e)})};c.1w({1l:12(a,b,d){7(!a||a.1f===3||a.1f===8)14 w;7((b==="2p"||b==="2H")&&1Q(d)<0)d=w;17 f=a.1l||a,e=d!==w;7(!c.1x.1X&&b==="1X"){7(e){f.9G=1;b=7B(d,10)+""==="dM"?"":"9E(1X="+d*6u+")";a=f.1z||c.1Y(a,"1z")||"";f.1z=6v.1e(a)?a.1q(6v,b):b}14 f.1z&&f.1z.1P("1X=")>=0?1Q(6F.1N(f.1z)[1])/6u+"":""}7(60.1e(b))b=6E;b=b.1q(6k,6h);7(e)f[b]=d;14 f[b]},2e:12(a,b,d,f){7(b==="2p"||b==="2H"){17 e,j=b==="2p"?9S:9T;12 i(){e=b==="2p"?a.5D:a.9Y;f!=="4o"&&c.1i(j,12(){f||(e-=1Q(c.1Y(a,"5F"+6,1a))||0);7(f==="4p")e+=1Q(c.1Y(a,"4p"+6,1a))||0;1b e-=1Q(c.1Y(a,"4o"+6+"aQ",1a))||0})}a.5D!==0?i():c.9U(a,a8,i);14 3t.4C(0,3t.dw(e))}14 c.1Y(a,b,d)},1Y:12(a,b,d){17 f,e=a.1l;7(!c.1x.1X&&b==="1X"&&a.3o){f=6F.1e(a.3o.1z||"")?1Q(4P.$1)/6u+"":"";14 f===""?"1":f}7(60.1e(b))b=6E;7(!d&&e&&e[b])f=e[b];1b 7(ac){7(60.1e(b))b="6D";b=b.1q(ad,"-$1").1F();e=a.1H.56;7(!e)14 1c;7(a=e.64(a,1c))f=a.cF(b);7(b==="1X"&&f==="")f="1"}1b 7(a.3o){d=b.1q(6k,6h);f=a.3o[b]||a.3o[d];7(!a4.1e(f)&&a3.1e(f)){b=e.1D;17 j=a.6z.1D;a.6z.1D=a.3o.1D;e.1D=d==="cG"?"cH":f||0;f=e.cE+"3R";e.1D=b;a.6z.1D=j}}14 f},9U:12(a,b,d){17 f={};19(17 e 1r b){f[e]=a.1l[e];a.1l[e]=b[e]}d.1j(a);19(e 1r b)a.1l[e]=f[e]}});7(c.21&&c.21.2T){c.21.2T.2N=12(a){17 b=a.5D,d=a.9Y,f=a.1n.1F()==="3T";14 b===0&&d===0&&!f?1a:b>0&&d>0&&!f?1d:c.1Y(a,"22")==="3g"};c.21.2T.8r=12(a){14!c.21.2T.2N(a)}}17 9J=J(),ae=/<2a(.|\\s)*?\\/2a>/cD,a6=/2j|52/i,a7=/9Z|cA|cB|cC|2N|cI|3M|4J|cJ|cP|cQ|1K|cO|1B|cN/i,N=/=\\?(&|$)/,5O=/\\?/,ah=/(\\?|&)9L=.*?(&|$)/,ag=/^(\\w+:)?\\/\\/([^\\/?#]+)/,8B=/%20/g,9V=c.1v.5t;c.1v.1w({5t:12(a,b,d){7(1g a!=="1y")14 9V.1j(6,a);1b 7(!6.18)14 6;17 f=a.1P(" ");7(f>=0){17 e=a.27(f,a.18);a=a.27(0,f)}f="3H";7(b)7(c.1u(b)){d=b;b=1c}1b 7(1g b==="1S"){b=c.4I(b,c.4B.82);f="84"}17 j=6;c.4Q({1B:a,1k:f,2m:"2U",1h:b,2E:12(i,o){7(o==="3p"||o==="7S")j.2U(e?c("<1G />").2X(i.6e.1q(ae,"")).1V(e):i.6e);d&&j.1i(d,[i.6e,o,i])}});14 6},cK:12(){14 c.4I(6.a9())},a9:12(){14 6.2f(12(){14 6.a5?c.36(6.a5):6}).1z(12(){14 6.34&&!6.5K&&(6.2V||a6.1e(6.1n)||a7.1e(6.1k))}).2f(12(a,b){a=c(6).3K();14 a==1c?1c:c.2t(a)?c.2f(a,12(d){14{34:b.34,2D:d}}):{34:b.34,2D:a}}).3L()}});c.1i("9M 7G 9R al 9Q aE".2A(" "),12(a,b){c.1v[b]=12(d){14 6.8f(b,d)}});c.1w({3L:12(a,b,d,f){7(c.1u(b)){f=f||d;d=b;b=1c}14 c.4Q({1k:"3H",1B:a,1h:b,3p:d,2m:f})},cL:12(a,b){14 c.3L(a,1c,b,"2a")},cM:12(a,b,d){14 c.3L(a,b,d,"3E")},cz:12(a,b,d,f){7(c.1u(b)){f=f||d;d=b;b={}}14 c.4Q({1k:"84",1B:a,1h:b,3p:d,2m:f})},cy:12(a){c.1w(c.4B,a)},4B:{1B:4M.2Q,2S:1a,1k:"3H",7C:"6g/x-cl-4V-cm",9O:1a,3J:1a,aK:A.7E&&(A.4M.83!=="4S:"||!A.9H)?12(){14 2F A.7E}:12(){24{14 2F A.9H("cn.ck")}23(a){}},5s:{4Z:"6g/4Z, 1K/4Z",2U:"1K/2U",2a:"1K/3G, 6g/3G",3E:"6g/3E, 1K/3G",1K:"1K/cj",2L:"*/*"}},6c:{},6d:{},4Q:12(a){12 b(){e.3p&&e.3p.1j(k,o,i,x);e.2S&&f("9Q",[x,e])}12 d(){e.2E&&e.2E.1j(k,x,i);e.2S&&f("9R",[x,e]);e.2S&&!--c.6q&&c.1m.2k("7G")}12 f(q,p){(e.26?c(e.26):c.1m).2k(q,p)}17 e=c.1w(1a,{},c.4B,a),j,i,o,k=a&&a.26||e,n=e.1k.5z();7(e.1h&&e.9O&&1g e.1h!=="1y")e.1h=c.4I(e.1h,e.82);7(e.2m==="5o"){7(n==="3H")N.1e(e.1B)||(e.1B+=(5O.1e(e.1B)?"&":"?")+(e.5o||"9N")+"=?");1b 7(!e.1h||!N.1e(e.1h))e.1h=(e.1h?e.1h+"&":"")+(e.5o||"9N")+"=?";e.2m="3E"}7(e.2m==="3E"&&(e.1h&&N.1e(e.1h)||N.1e(e.1B))){j=e.cg||"5o"+9J++;7(e.1h)e.1h=(e.1h+"").1q(N,"="+j+"$1");e.1B=e.1B.1q(N,"="+j+"$1");e.2m="2a";A[j]=A[j]||12(q){o=q;b();d();A[j]=w;24{2q A[j]}23(p){}z&&z.2s(C)}}7(e.2m==="2a"&&e.2O===1c)e.2O=1d;7(e.2O===1d&&n==="3H"){17 r=J(),u=e.1B.1q(ah,"$ch="+r+"$2");e.1B=u+(u===e.1B?(5O.1e(e.1B)?"&":"?")+"9L="+r:"")}7(e.1h&&n==="3H")e.1B+=(5O.1e(e.1B)?"&":"?")+e.1h;e.2S&&!c.6q++&&c.1m.2k("9M");r=(r=ag.1N(e.1B))&&(r[1]&&r[1]!==4M.83||r[2]!==4M.dx);7(e.2m==="2a"&&n==="3H"&&r){17 z=s.28("aI")[0]||s.1T,C=s.1O("2a");C.5M=e.1B;7(e.aM)C.co=e.aM;7(!j){17 B=1d;C.7z=C.3y=12(){7(!B&&(!6.3z||6.3z==="cp"||6.3z==="2E")){B=1a;b();d();C.7z=C.3y=1c;z&&C.1p&&z.2s(C)}}}z.2R(C,z.1A);14 w}17 E=1d,x=e.aK();7(x){e.aH?x.aP(n,e.1B,e.3J,e.aH,e.4J):x.aP(n,e.1B,e.3J);24{7(e.1h||a&&a.7C)x.4K("cv-cw",e.7C);7(e.aq){c.6c[e.1B]&&x.4K("aV-au-cx",c.6c[e.1B]);c.6d[e.1B]&&x.4K("aV-cu-ct",c.6d[e.1B])}r||x.4K("X-cq-cr","7E");x.4K("cs",e.2m&&e.5s[e.2m]?e.5s[e.2m]+", */*":e.5s.2L)}23(5p){}7(e.aT&&e.aT.1j(k,x,e)===1d){e.2S&&!--c.6q&&c.1m.2k("7G");x.43();14 1d}e.2S&&f("aE",[x,e]);17 g=x.3y=12(q){7(!x||x.3z===0||q==="43"){E||d();E=1a;7(x)x.3y=c.4W}1b 7(!E&&x&&(x.3z===4||q==="3O")){E=1a;x.3y=c.4W;i=q==="3O"?"3O":!c.as(x)?"2o":e.aq&&c.aD(x,e.1B)?"7S":"3p";17 p;7(i==="3p")24{o=c.ax(x,e.2m,e)}23(v){i="7V";p=v}7(i==="3p"||i==="7S")j||b();1b c.89(e,x,i,p);d();q==="3O"&&x.43();7(e.3J)x=1c}};24{17 h=x.43;x.43=12(){x&&h.1j(x);g("43")}}23(l){}e.3J&&e.3O>0&&5u(12(){x&&!E&&g("3O")},e.3O);24{x.cR(n==="84"||n==="cS"||n==="dj"?e.1h:1c)}23(m){c.89(e,x,1c,m);d()}e.3J||g();14 x}},89:12(a,b,d,f){7(a.2o)a.2o.1j(a.26||a,b,d,f);7(a.2S)(a.26?c(a.26):c.1m).2k("al",[b,a,f])},6q:0,as:12(a){24{14!a.3r&&4M.83==="4S:"||a.3r>=9h&&a.3r<dk||a.3r===aw||a.3r===dl||a.3r===0}23(b){}14 1d},aD:12(a,b){17 d=a.7Z("di-au"),f=a.7Z("dh");7(d)c.6c[b]=d;7(f)c.6d[b]=f;14 a.3r===aw||a.3r===0},ax:12(a,b,d){17 f=a.7Z("de-1k")||"",e=b==="4Z"||!b&&f.1P("4Z")>=0;a=e?a.df:a.6e;e&&a.1T.1n==="7V"&&c.2o("7V");7(d&&d.8R)a=d.8R(a,b);7(1g a==="1y")7(b==="3E"||!b&&f.1P("3E")>=0)a=c.8S(a);1b 7(b==="2a"||!b&&f.1P("3G")>=0)c.7W(a);14 a},4I:12(a,b){12 d(i,o){7(c.2t(o))c.1i(o,12(k,n){b||/\\[\\]$/.1e(i)?f(i,n):d(i+"["+(1g n==="1S"||c.2t(n)?k:"")+"]",n)});1b!b&&o!=1c&&1g o==="1S"?c.1i(o,12(k,n){d(i+"["+k+"]",n)}):f(i,o)}12 f(i,o){o=c.1u(o)?o():o;e[e.18]=8P(i)+"="+8P(o)}17 e=[];7(b===w)b=c.4B.82;7(c.2t(a)||a.4H)c.1i(a,12(){f(6.34,6.2D)});1b 19(17 j 1r a)d(j,a[j]);14 e.3D("&").1q(8B,"+")}});17 5W={},9u=/3N|2x|2z/,9o=/^([+-]=)?([\\d+-.]+)(.*)$/,W,8b=[["2H","5B","dg","dm","do"],["2p","7F","du","8w","dv"],["1X"]];c.1v.1w({2x:12(a,b){7(a||a===0)14 6.47(K("2x",3),a,b);1b{a=0;19(b=6.18;a<b;a++){17 d=c.1h(6[a],"4m");6[a].1l.22=d||"";7(c.2e(6[a],"22")==="3g"){d=6[a].1n;17 f;7(5W[d])f=5W[d];1b{17 e=c("<"+d+" />").9j("1M");f=e.2e("22");7(f==="3g")f="5I";e.2b();5W[d]=f}c.1h(6[a],"4m",f)}}a=0;19(b=6.18;a<b;a++)6[a].1l.22=c.1h(6[a],"4m")||"";14 6}},2z:12(a,b){7(a||a===0)14 6.47(K("2z",3),a,b);1b{a=0;19(b=6.18;a<b;a++){17 d=c.1h(6[a],"4m");!d&&d!=="3g"&&c.1h(6[a],"4m",c.2e(6[a],"22"))}a=0;19(b=6.18;a<b;a++)6[a].1l.22="3g";14 6}},9f:c.1v.3N,3N:12(a,b){17 d=1g a==="5l";7(c.1u(a)&&c.1u(b))6.9f.1E(6,1s);1b a==1c||d?6.1i(12(){17 f=d?a:c(6).4h(":2N");c(6)[f?"2x":"2z"]()}):6.47(K("3N",3),a,b);14 6},dt:12(a,b,d){14 6.1z(":2N").2e("1X",0).2x().42().47({1X:b},a,d)},47:12(a,b,d,f){17 e=c.94(b,d,f);7(c.5j(a))14 6.1i(e.2E);14 6[e.2G===1d?"1i":"2G"](12(){17 j=c.1w({},e),i,o=6.1f===1&&c(6).4h(":2N"),k=6;19(i 1r a){17 n=i.1q(6k,6h);7(i!==n){a[n]=a[i];2q a[i];i=n}7(a[i]==="2z"&&o||a[i]==="2x"&&!o)14 j.2E.1j(6);7((i==="2H"||i==="2p")&&6.1l){j.22=c.2e(6,"22");j.3m=6.1l.3m}7(c.2t(a[i])){(j.6b=j.6b||{})[i]=a[i][1];a[i]=a[i][0]}}7(j.3m!=1c)6.1l.3m="2N";j.5b=c.1w({},a);c.1i(a,12(r,u){17 z=2F c.1R(k,j,r);7(9u.1e(u))z[u==="3N"?o?"2x":"2z":u](a);1b{17 C=9o.1N(u),B=z.5d(1a)||0;7(C){u=1Q(C[2]);17 E=C[3]||"3R";7(E!=="3R"){k.1l[r]=(u||1)+E;B=(u||1)/z.5d(1a)*B;k.1l[r]=B+E}7(C[1])u=(C[1]==="-="?-1:1)*u+B;z.5f(B,u,E)}1b z.5f(B,u,"")}});14 1a})},8i:12(a,b){17 d=c.51;a&&6.2G([]);6.1i(12(){19(17 f=d.18-1;f>=0;f--)7(d[f].1o===6){b&&d[f](1a);d.2y(f,1)}});b||6.3l();14 6}});c.1i({ds:K("2x",1),dp:K("2z",1),dq:K("3N",1),dr:{1X:"2x"},dd:{1X:"2z"}},12(a,b){c.1v[a]=12(d,f){14 6.47(b,d,f)}});c.1w({94:12(a,b,d){17 f=a&&1g a==="1S"?a:{2E:d||!d&&b||c.1u(a)&&a,3u:a,4w:d&&b||b&&!c.1u(b)&&b};f.3u=c.1R.dc?0:1g f.3u==="3M"?f.3u:c.1R.5H[f.3u]||c.1R.5H.2L;f.7N=f.2E;f.2E=12(){f.2G!==1d&&c(6).3l();c.1u(f.7N)&&f.7N.1j(6)};14 f},4w:{96:12(a,b,d,f){14 d+f*a},8j:12(a,b,d,f){14(-3t.cY(a*3t.cZ)/2+0.5)*f+d}},51:[],1R:12(a,b,d){6.1C=b;6.1o=a;6.1I=d;7(!b.4G)b.4G={}}});c.1R.2h={8h:12(){6.1C.3S&&6.1C.3S.1j(6.1o,6.3j,6);(c.1R.3S[6.1I]||c.1R.3S.2L)(6);7((6.1I==="2H"||6.1I==="2p")&&6.1o.1l)6.1o.1l.22="5I"},5d:12(a){7(6.1o[6.1I]!=1c&&(!6.1o.1l||6.1o.1l[6.1I]==1c))14 6.1o[6.1I];14(a=1Q(c.2e(6.1o,6.1I,a)))&&a>-d0?a:1Q(c.1Y(6.1o,6.1I))||0},5f:12(a,b,d){12 f(j){14 e.3S(j)}6.8c=J();6.5n=a;6.42=b;6.8k=d||6.8k||"3R";6.3j=6.5n;6.5R=6.6a=0;17 e=6;f.1o=6.1o;7(f()&&c.51.1t(f)&&!W)W=5e(c.1R.99,13)},2x:12(){6.1C.4G[6.1I]=c.1l(6.1o,6.1I);6.1C.2x=1a;6.5f(6.1I==="2p"||6.1I==="2H"?1:0,6.5d());c(6.1o).2x()},2z:12(){6.1C.4G[6.1I]=c.1l(6.1o,6.1I);6.1C.2z=1a;6.5f(6.5d(),0)},3S:12(a){17 b=J(),d=1a;7(a||b>=6.1C.3u+6.8c){6.3j=6.42;6.5R=6.6a=1;6.8h();6.1C.5b[6.1I]=1a;19(17 f 1r 6.1C.5b)7(6.1C.5b[f]!==1a)d=1d;7(d){7(6.1C.22!=1c){6.1o.1l.3m=6.1C.3m;a=c.1h(6.1o,"4m");6.1o.1l.22=a?a:6.1C.22;7(c.2e(6.1o,"22")==="3g")6.1o.1l.22="5I"}6.1C.2z&&c(6.1o).2z();7(6.1C.2z||6.1C.2x)19(17 e 1r 6.1C.5b)c.1l(6.1o,e,6.1C.4G[e]);6.1C.2E.1j(6.1o)}14 1d}1b{e=b-6.8c;6.6a=e/6.1C.3u;a=6.1C.4w||(c.4w.8j?"8j":"96");6.5R=c.4w[6.1C.6b&&6.1C.6b[6.1I]||a](6.6a,e,0,1,6.1C.3u);6.3j=6.5n+(6.42-6.5n)*6.5R;6.8h()}14 1a}};c.1w(c.1R,{99:12(){19(17 a=c.51,b=0;b<a.18;b++)a[b]()||a.2y(b--,1);a.18||c.1R.8i()},8i:12(){cX(W);W=1c},5H:{cW:cT,cU:9h,2L:cV},3S:{1X:12(a){c.1l(a.1o,"1X",a.3j)},2L:12(a){7(a.1o.1l&&a.1o.1l[a.1I]!=1c)a.1o.1l[a.1I]=(a.1I==="2p"||a.1I==="2H"?3t.4C(0,a.3j):a.3j)+a.8k;1b a.1o[a.1I]=a.3j}}});7(c.21&&c.21.2T)c.21.2T.d1=12(a){14 c.45(c.51,12(b){14 a===b.1o}).18};c.1v.1L="8t"1r s.1T?12(a){17 b=6[0];7(a)14 6.1i(12(e){c.1L.7I(6,a,e)});7(!b||!b.1H)14 1c;7(b===b.1H.1M)14 c.1L.7P(b);17 d=b.8t(),f=b.1H;b=f.1M;f=f.1T;14{1W:d.1W+(8z.ak||c.1x.4t&&f.3n||b.3n)-(f.5v||b.5v||0),1D:d.1D+(8z.7T||c.1x.4t&&f.3i||b.3i)-(f.6n||b.6n||0)}}:12(a){17 b=6[0];7(a)14 6.1i(12(r){c.1L.7I(6,a,r)});7(!b||!b.1H)14 1c;7(b===b.1H.1M)14 c.1L.7P(b);c.1L.6t();17 d=b.4k,f=b,e=b.1H,j,i=e.1T,o=e.1M;f=(e=e.56)?e.64(b,1c):b.3o;19(17 k=b.30,n=b.6f;(b=b.1p)&&b!==o&&b!==i;){7(c.1L.7Q&&f.2g==="7M")25;j=e?e.64(b,1c):b.3o;k-=b.3n;n-=b.3i;7(b===d){k+=b.30;n+=b.6f;7(c.1L.9t&&!(c.1L.90&&/^t(d2|d|h)$/i.1e(b.1n))){k+=1Q(j.85)||0;n+=1Q(j.80)||0}f=d;d=b.4k}7(c.1L.92&&j.3m!=="8r"){k+=1Q(j.85)||0;n+=1Q(j.80)||0}f=j}7(f.2g==="2Y"||f.2g==="7Y"){k+=o.30;n+=o.6f}7(c.1L.7Q&&f.2g==="7M"){k+=3t.4C(i.3n,o.3n);n+=3t.4C(i.3i,o.3i)}14{1W:k,1D:n}};c.1L={6t:12(){17 a=s.1M,b=s.1O("1G"),d,f,e,j=1Q(c.1Y(a,"5B",1a))||0;c.1w(b.1l,{2g:"62",1W:0,1D:0,4p:0,4o:0,2p:"3Q",2H:"3Q",8Q:"2N"});b.2I="<1G 1l=\'2g:62;1W:0;1D:0;4p:0;4o:8L 8F #8p;5F:0;2p:3Q;2H:3Q;\'><1G></1G></1G><2i 1l=\'2g:62;1W:0;1D:0;4p:0;4o:8L 8F #8p;5F:0;2p:3Q;2H:3Q;\' d8=\'0\' 9g=\'0\'><3T><5m></5m></3T></2i>";a.2R(b,a.1A);d=b.1A;f=d.1A;e=d.3d.1A.1A;6.9t=f.30!==5;6.90=e.30===5;f.1l.2g="7M";f.1l.1W="d9";6.7Q=f.30===20||f.30===15;f.1l.2g=f.1l.1W="";d.1l.3m="2N";d.1l.2g="2Y";6.92=f.30===-5;6.9d=a.30!==j;a.2s(b);c.1L.6t=c.4W},7P:12(a){17 b=a.30,d=a.6f;c.1L.6t();7(c.1L.9d){b+=1Q(c.1Y(a,"5B",1a))||0;d+=1Q(c.1Y(a,"7F",1a))||0}14{1W:b,1D:d}},7I:12(a,b,d){7(/7Y/.1e(c.1Y(a,"2g")))a.1l.2g="2Y";17 f=c(a),e=f.1L(),j=7B(c.1Y(a,"1W",1a),10)||0,i=7B(c.1Y(a,"1D",1a),10)||0;7(c.1u(b))b=b.1j(a,d,e);d={1W:b.1W-e.1W+j,1D:b.1D-e.1D+i};"8O"1r b?b.8O.1j(a,d):f.2e(d)}};c.1v.1w({2g:12(){7(!6[0])14 1c;17 a=6[0],b=6.4k(),d=6.1L(),f=/^1M|2U$/i.1e(b[0].1n)?{1W:0,1D:0}:b.1L();d.1W-=1Q(c.1Y(a,"5B",1a))||0;d.1D-=1Q(c.1Y(a,"7F",1a))||0;f.1W+=1Q(c.1Y(b[0],"85",1a))||0;f.1D+=1Q(c.1Y(b[0],"80",1a))||0;14{1W:d.1W-f.1W,1D:d.1D-f.1D}},4k:12(){14 6.2f(12(){19(17 a=6.4k||s.1M;a&&!/^1M|2U$/i.1e(a.1n)&&c.2e(a,"2g")==="7Y";)a=a.4k;14 a})}});c.1i(["aA","at"],12(a,b){17 d="68"+b;c.1v[d]=12(f){17 e=6[0],j;7(!e)14 1c;7(f!==w)14 6.1i(12(){7(j=8a(6))j.8e(!a?f:c(j).3i(),a?f:c(j).3n());1b 6[d]=f});1b 14(j=8a(e))?"7T"1r j?j[a?"ak":"7T"]:c.1x.4t&&j.35.1T[d]||j.35.1M[d]:e[d]}});c.1i(["d7","aQ"],12(a,b){17 d=b.1F();c.1v["d6"+b]=12(){14 6[0]?c.2e(6[0],d,1d,"5F"):1c};c.1v["d3"+b]=12(f){14 6[0]?c.2e(6[0],d,1d,f?"4p":"4o"):1c};c.1v[d]=12(f){17 e=6[0];7(!e)14 f==1c?1c:6;7(c.1u(f))14 6.1i(12(j){17 i=c(6);i[d](f.1j(6,j,i[d]()))});14"8e"1r e&&e.35?e.35.d4==="d5"&&e.35.1T["8g"+b]||e.35.1M["8g"+b]:e.1f===9?3t.4C(e.1T["8g"+b],e.1M["68"+b],e.1T["68"+b],e.1M["1L"+b],e.1T["1L"+b]):f===w?c.2e(e,d):6.2e(d,1g f==="1y"?f:f+"3R")}});A.5h=A.$=c})(9I);',62,893,'||||||this|if|||||||||||||||||||||||||||||||||||||||||||||||||||||||||function||return|||var|length|for|true|else|null|false|test|nodeType|typeof|data|each|call|type|style|event|nodeName|elem|parentNode|replace|in|arguments|push|isFunction|fn|extend|support|string|filter|firstChild|url|options|left|apply|toLowerCase|div|ownerDocument|prop|selector|text|offset|body|exec|createElement|indexOf|parseFloat|fx|object|documentElement|className|find|top|opacity|curCSS|guid||expr|display|catch|try|break|context|slice|getElementsByTagName|handle|script|remove|match|target|css|map|position|prototype|table|select|trigger|add|dataType||error|width|delete|tbody|removeChild|isArray|events|handler|getAttribute|show|splice|hide|split|childNodes|button|value|complete|new|queue|height|innerHTML|pushStack|ready|_default|undefined|hidden|cache|appendChild|href|insertBefore|global|filters|html|checked|on|append|relative|Array|offsetTop|special||attachEvent|name|document|makeArray|addEventListener|input|ID|shift|live|class|nextSibling|setup|radio|none|nth|scrollLeft|now|preventDefault|dequeue|overflow|scrollTop|currentStyle|success|teardown|status|attr|Math|duration|submit|browser|domManip|onreadystatechange|readyState|POS|dir|merge|join|json|cloneNode|javascript|GET|inArray|async|val|get|number|toggle|timeout|compareDocumentPosition|1px|px|step|tr|Event|sort|stopPropagation|getAttributeNode|checkbox|u00c0|uFFFF|handleObj|end|abort|expando|grep|proxy|animate|eq|option|TAG|previousSibling|click|keyCode|first|sizcache|selected|is|sourceIndex|empty|offsetParent|getElementById|olddisplay|origType|border|margin|mouseenter|mouseleave|set|boxModel|change|createTextNode|easing|noData|props|unbind|cleanData|ajaxSettings|max|relatedTarget|not|removeAttribute|orig|jquery|param|password|setRequestHeader|charCode|location|isReady|namespace|RegExp|ajax|removeData|file|which|replaceWith|form|noop|result|fragment|xml|currentTarget|timers|textarea|PSEUDO|contains||defaultView|closest|getElementsByClassName|CLASS|id|curAnim|focusout|cur|setInterval|custom|last|jQuery|one|isEmptyObject|order|boolean|td|start|jsonp|ga|before|selectedIndex|accepts|load|setTimeout|clientTop|attrFn|index|focus|toUpperCase|source|marginTop|pop|offsetWidth|CHILD|padding|version|speeds|block|concat|disabled|nodeValue|src|querySelectorAll|ka|Object|removeEventListener|pos|sizset|deleteExpando|odd|even|la|leftMatch|checkClone|attrHandle|ha|case|absolute|fromElement|getComputedStyle|wrapAll|fa|JSON|scroll|multiple|state|specialEasing|lastModified|etag|responseText|offsetLeft|application|ja|trim|isPlainObject|ia|triggerHandler|blur|clientLeft|parent|cssFloat|active|toArray|substr|initialize|100|Na|preType|clientX|pa|runtimeStyle|mouseover|metaKey|Ca|float|Pa|Oa|mouseout|pageX|onbeforeunload|sibling|isImmediatePropagationStopped|image|Ea|unique|isPropagationStopped|isDefaultPrevented|embed|area|link|attrMap|unshift|Da|na|uniqueSort|_change_data|Fa|createRange|fix|Ia|qa|specialSubmit|prevObject|nodeIndex|ATTR|originalEvent|colgroup|after|clone|bindReady|oa|fragments|clean|Ka|leadingWhitespace|lastChild|La|Ma|Ga|Ha|noCloneEvent|isXMLDoc|NAME|preFilter|detachEvent|ra|Ja|cacheable|sa|ua|ta|ctrlKey|onload|tabIndex|parseInt|contentType|xa|XMLHttpRequest|marginLeft|ajaxStop|Ba|setOffset|focusin|scriptEval|ya|fixed|old|triggered|bodyOffset|supportsFixedPosition|onclick|notmodified|pageXOffset|inprogress|parsererror|globalEval|za|static|getResponseHeader|borderLeftWidth|ma|traditional|protocol|POST|borderTopWidth|Aa|removeClass|addClass|handleError|wa|va|startTime|constructor|scrollTo|bind|client|update|stop|swing|unit|init|Wa|Va|Ya|000|setFilters|visible|gb|getBoundingClientRect|lastToggle|reset|paddingLeft|Function|submitBubbles|self|fb|yb|throw|Syntax|unrecognized|solid|textContent|die|gt|hb|expression|5px|only|parse|using|encodeURIComponent|visibility|dataFilter|parseJSON|matches|setStart|Ua|createDocumentFragment|webkit|has|uaMatch|doesAddBorderForTableAndCells|fieldset|subtractsBorderForOverflowNotVisible|selectors|speed|DOMContentLoaded|linear|TEST|red|tick||fireEvent|specialChange|doesNotIncludeMarginInBodyOffset|getTime|_toggle|cellspacing|200|prevUntil|appendTo|changeBubbles|setEnd|prevAll|readOnly|Bb|Date|getElementsByName|parents|htmlFor|doesNotAddBorder|Ab|children|htmlSerialize|origHandler|Sa|pageY|clientY|kb|toElement|keypress|alpha|srcElement|zoom|ActiveXObject|window|sb|col|_|ajaxStart|callback|processData|exclusive|ajaxSuccess|ajaxComplete|pb|qb|swap|zb|Ta|Xa|offsetHeight|color|liveFired|unload|u00A0|nb|mb|elements|ub|vb|ob|serializeArray|||rb|lb|tb|keydown|xb|wb|detach|toggleClass|pageYOffset|ajaxError|__className__|hasClass|Za|Ra|ifModified|checkOn|httpSuccess|Top|Modified|hover|304|httpData|thead|lt|Left|contents|wrapInner|httpNotModified|ajaxSend|Qa|prepend|username|head|ib|xhr|frameElement|scriptCharset|hrefNormalized|setAttribute|open|Width|toString|doScroll|beforeSend|cssText|If|specified|jb|parentWindow|fA|512|bfnrt|isPrototypeOf|eE|undelegate|noConflict|size|hasOwnProperty|userAgent|navigator|||frameBorder|charAt||changed|altKey|attrChange|bubbles|attrName|can|1E8|clearQueue|delay|removeAttr|beforeunload|property||attributes|cancelable|detail|relatedNode|layerY|prevValue|originalTarget|offsetX|offsetY|layerX|screenX|view|wheelDelta|shiftKey|screenY|eventPhase|timeStamp|returnValue|radiotest|optSelected|readonly|maxlength|cellSpacing|maxLength|safari|rv|opera|Invalid|msie|compatible|mozilla|rowspan|rowSpan|applet|newValue|stopImmediatePropagation|getData|setData|cancelBubble|frameborder|useMap|colSpan|colspan|tabindex|||beforeactivate|usemap|delegate|weight|jsonpCallback|1_|resize|plain|XMLHTTP|www|urlencoded|Microsoft|charset|loaded|Requested|With|Accept|Match|None|Content|Type|Since|ajaxSetup|post|date|datetime|email|gi|pixelLeft|getPropertyValue|fontSize|1em|month|range|serialize|getScript|getJSON|week|time|search|tel|send|PUT|600|fast|400|slow|clearInterval|cos|PI|10000|animated|able|outer|compatMode|CSS1Compat|inner|Height|cellpadding|20px|||off|fadeOut|content|responseXML|marginBottom|Etag|Last|DELETE|300|1223|paddingTop||paddingBottom|slideUp|slideToggle|fadeIn|slideDown|fadeTo|marginRight|paddingRight|round|host|andSelf|parentsUntil|Until|HTML|START_TO_END|createComment|next|prev|contentDocument|contentWindow|iframe|siblings|nextAll|nextUntil|NaN|compareBoundaryPoints|keyup|onunload|mousemove|mouseup|dblclick|mousedown|continue|child|innerText|switch|header|enabled|0n|reverse|Range|tfoot|caption|wrap|optgroup|legend|Right|meta|||unwrap|outerHTML|replaceAll|font|insertAfter|line|prependTo|ig|Bottom|th|img|styleFloat|hr'.split('|'),0,{})) | $(document).ready(function(){
$('.box a').mouseover(function(){
$(this).stop().animate({"top":"-110px"}, 220);
})
$('.box a').mouseout(function(){
$(this).stop().animate({"top":"0"}, 220);
})
}) | |
test_hacsbase_data.py | """Data Test Suite."""
import pytest
from custom_components.hacs.base import HacsRepositories
from custom_components.hacs.enums import HacsGitHubRepo
from custom_components.hacs.utils.data import HacsData
from tests.async_mock import patch
@pytest.mark.asyncio
async def test_hacs_data_async_write1(hacs, repository):
data = HacsData(hacs)
repository.data.installed = True
repository.data.installed_version = "1"
hacs.repositories.register(repository)
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_async_write2(hacs):
data = HacsData(hacs)
hacs.system.disabled_reason = None
hacs.repositories = HacsRepositories()
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_restore_write_new(hacs, caplog):
data = HacsData(hacs)
await data.restore()
with patch("custom_components.hacs.utils.data.async_save_to_store") as mock_async_save_to_store:
await data.async_write()
assert mock_async_save_to_store.called
assert "Loading base repository information" in caplog.text
| @pytest.mark.asyncio
async def test_hacs_data_restore_write_not_new(hacs, caplog):
data = HacsData(hacs)
async def _mocked_loads(hass, key):
if key == "repositories":
return {
"172733314": {
"category": "integration",
"full_name": "hacs/integration",
"installed": True,
"show_beta": True,
},
"202226247": {
"category": "integration",
"full_name": "shbatm/hacs-isy994",
"installed": False,
},
}
elif key == "hacs":
return {}
elif key == "renamed_repositories":
return {}
else:
raise ValueError(f"No mock for {key}")
with patch("os.path.exists", return_value=True), patch(
"custom_components.hacs.utils.data.async_load_from_store",
side_effect=_mocked_loads,
):
await data.restore()
assert hacs.repositories.get_by_id("202226247")
assert hacs.repositories.get_by_full_name("shbatm/hacs-isy994")
assert hacs.repositories.get_by_id("172733314")
assert hacs.repositories.get_by_full_name(HacsGitHubRepo.INTEGRATION)
assert hacs.repositories.get_by_id("172733314").data.show_beta is True
assert hacs.repositories.get_by_id("172733314").data.installed is True
with patch("custom_components.hacs.utils.data.async_save_to_store") as mock_async_save_to_store:
await data.async_write()
assert mock_async_save_to_store.called
assert "Loading base repository information" not in caplog.text | |
test_mpc.py | import unittest
import os
import numpy as np
import pandas as pd
from scipy.signal import StateSpace
import matplotlib.pyplot as plt
import mshoot
def cfun(xdf, ydf):
"""
:param ydf: DataFrame, model states
:param ydf: DataFrame, model outputs
:return: float
"""
qout = ydf['qout'].values
c = np.sum(qout ** 2) / qout.size
return c
class TestMPC(unittest.TestCase):
def setUp(self):
fmupath = os.path.join('resources', 'fmus', 'R1C1', 'R1C1.fmu')
parameters = {'C': 1e6, 'R': 0.01}
self.model = mshoot.SimFMU(
fmupath,
outputs=['qout', 'Tr'],
states=['heatCapacitor.T'],
parameters=parameters,
verbose=False)
def tearDown(self):
pass
def test_mpc(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=inp.copy(),
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# ax = u.plot(title='u')
# ax.set_ylim(0, 4000)
# ax = xemu.plot(title='xemu')
# ax.set_ylim(292.15, 296.15)
# plt.show()
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
def test_mpc_inp_clb(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Input callback function
def inp_clb(index):
return inp.loc[index]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=None,
inp_clb=inp_clb,
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
# def test_2_inputs(self):
# """THE SOLVER HAS PROBLEMS WITH GETTING THE RIGHT SOLUTION. (?)"""
# # Inputs
# t = np.arange(0, 3600 * 10, 3600)
# inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout']) | # inp['q'] = np.full(t.size, 0)
# inp['Tout'] = np.full(t.size, 273.15)
# # Bounds
# ubounds = [(0., 10000.), (272.15, 275.)] # <-- Solver should try to yield Tout = 275
# xbounds = [(293.15, 296.15)]
# # Initial state
# x0 = [293.65]
# # Optimization
# mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
# u, xctr, xemu, yemu, uhist = mpc.optimize(
# model=self.model,
# inp=inp,
# free=['q', 'Tout'],
# ubounds=ubounds,
# xbounds=xbounds,
# x0=x0,
# unominal=[4000., 273.15],
# ynominal=[4000., 293.15],
# step=1,
# horizon=4
# )
# ax = u.plot(title='u', subplots=True)
# ax = xemu.plot(title='xemu')
# plt.show()
# # Assert the solution is correct
# self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.01)
# # Validate emulation with optimized control
# inp['q'] = u['q']
# yvld, xvld = self.model.simulate(inp, x0)
# # self.assertTrue((yvld - yemu < 1e-3).all().all()) # Might not be true for FMUs *
# # self.assertTrue((xvld - xemu < 1e-3).all().all()) # Might not be true for FMUs *
# # * FMU results might be shifted in time by one time step.
# # The reason is unknown, but FMU- or pyFMI-specific.
if __name__ == '__main__':
unittest.main() | |
VerifierDB.py | """Class for storing SRP password verifiers."""
from utils.cryptomath import *
from utils.compat import *
import mathtls
from BaseDB import BaseDB
class VerifierDB(BaseDB):
"""This class represent an in-memory or on-disk database of SRP
password verifiers.
A VerifierDB can be passed to a server handshake to authenticate
a client based on one of the verifiers.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new VerifierDB instance.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "verifier")
def _getItem(self, username, valueStr):
(N, g, salt, verifier) = valueStr.split(" ")
N = base64ToNumber(N)
g = base64ToNumber(g)
salt = base64ToString(salt)
verifier = base64ToNumber(verifier)
return (N, g, salt, verifier)
def __setitem__(self, username, verifierEntry):
"""Add a verifier entry to the database.
@type username: str
@param username: The username to associate the verifier with.
Must be less than 256 characters in length. Must not already
be in the database.
@type verifierEntry: tuple
@param verifierEntry: The verifier entry to add. Use
L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a
verifier entry.
"""
BaseDB.__setitem__(self, username, verifierEntry)
def _setItem(self, username, value):
if len(username)>=256:
raise ValueError("username too long")
N, g, salt, verifier = value
N = numberToBase64(N)
g = numberToBase64(g)
salt = stringToBase64(salt)
verifier = numberToBase64(verifier)
valueStr = " ".join( (N, g, salt, verifier) )
return valueStr
def | (self, value, username, param):
(N, g, salt, verifier) = value
x = mathtls.makeX(salt, username, param)
v = powMod(g, x, N)
return (verifier == v)
def makeVerifier(username, password, bits):
"""Create a verifier entry which can be stored in a VerifierDB.
@type username: str
@param username: The username for this verifier. Must be less
than 256 characters in length.
@type password: str
@param password: The password for this verifier.
@type bits: int
@param bits: This values specifies which SRP group parameters
to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144,
8192). Larger values are more secure but slower. 2048 is a
good compromise between safety and speed.
@rtype: tuple
@return: A tuple which may be stored in a VerifierDB.
"""
return mathtls.makeVerifier(username, password, bits)
makeVerifier = staticmethod(makeVerifier) | _checkItem |
csr.py | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .flags import FlagGroups
from .wsr import WSRFile
class CSRFile:
| '''A model of the CSR file'''
def __init__(self) -> None:
self.flags = FlagGroups()
self._known_indices = set()
self._known_indices.add(0x7c0) # FG0
self._known_indices.add(0x7c1) # FG1
self._known_indices.add(0x7c8) # FLAGS
for idx in range(0x7d0, 0x7d8):
self._known_indices.add(idx) # MODi
self._known_indices.add(0x7d8) # RND_PREFETCH
self._known_indices.add(0xfc0) # RND
self._known_indices.add(0xfc1) # URND
@staticmethod
def _get_field(field_idx: int, field_size: int, val: int) -> int:
mask = (1 << field_size) - 1
return (val >> (field_size * field_idx)) & mask
@staticmethod
def _set_field(field_idx: int, field_size: int, field_val: int,
old_val: int) -> int:
assert 0 <= field_val < (1 << field_size)
mask = (1 << field_size) - 1
shift = field_size * field_idx
return (old_val & ~(mask << shift)) | (field_val << shift)
def check_idx(self, idx: int) -> bool:
'''Return True if idx points to a valid CSR; False otherwise.'''
return idx in self._known_indices
def read_unsigned(self, wsrs: WSRFile, idx: int) -> int:
if 0x7c0 <= idx <= 0x7c1:
# FG0/FG1
fg = idx - 0x7c0
return self._get_field(fg, 4, self.flags.read_unsigned())
if idx == 0x7c8:
# FLAGS register
return self.flags.read_unsigned()
if 0x7d0 <= idx <= 0x7d7:
# MOD0 .. MOD7. MODi is bits [32*(i+1)-1..32*i]
mod_n = idx - 0x7d0
return self._get_field(mod_n, 32, wsrs.MOD.read_unsigned())
if idx == 0x7d8:
# RND_PREFETCH register
return 0
if idx == 0xfc0:
# RND register
return wsrs.RND.read_u32()
if idx == 0xfc1:
# URND register
return wsrs.URND.read_u32()
raise RuntimeError('Unknown CSR index: {:#x}'.format(idx))
def write_unsigned(self, wsrs: WSRFile, idx: int, value: int) -> None:
assert 0 <= value < (1 << 32)
if 0x7c0 <= idx <= 0x7c1:
# FG0/FG1
fg = idx - 0x7c0
old = self.flags.read_unsigned()
self.flags.write_unsigned(self._set_field(fg, 4, value & 0xf, old))
return
if idx == 0x7c8:
# FLAGS register
self.flags.write_unsigned(value)
return
if 0x7d0 <= idx <= 0x7d7:
# MOD0 .. MOD7. MODi is bits [32*(i+1)-1..32*i]. read,modify,write.
mod_n = idx - 0x7d0
old = wsrs.MOD.read_unsigned()
wsrs.MOD.write_unsigned(self._set_field(mod_n, 32, value, old))
return
if idx == 0x7d8:
# RND_PREFETCH
wsrs.RND.pending_request = True
return
if idx == 0xfc0:
# RND register (which ignores writes)
return
if idx == 0xfc1:
# URND register (which ignores writes)
return
raise RuntimeError('Unknown CSR index: {:#x}'.format(idx)) |
|
data_race.rs | //! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
//! based on the Dynamic Race Detection for C++:
//! https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf
//! which does not report false-positives when fences are used, and gives better
//! accuracy in presence of read-modify-write operations.
//!
//! The implementation contains modifications to correctly model the changes to the memory model in C++20
//! regarding the weakening of release sequences: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html.
//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
//! sequences is not needed.
//!
//! The implementation also models races with memory allocation and deallocation via treating allocation and
//! deallocation as a type of write internally for detecting data-races.
//!
//! This does not explore weak memory orders and so can still miss data-races
//! but should not report false-positives
//!
//! Data-race definition from(https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races):
//! a data race occurs between two memory accesses if they are on different threads, at least one operation
//! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
//! for full definition.
//!
//! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
//! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
//! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
//! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
//! there is some index x where clock[x] > thread_clock, when this is true clock[candidate-idx] > thread_clock
//! can never hold and hence a data-race can never be reported in that vector index again.
//! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
//! thread.
//!
//! The sequentially consistent ordering corresponds to the ordering that the threads
//! are currently scheduled, this means that the data-race detector has no additional
//! logic for sequentially consistent accesses at the moment since they are indistinguishable
//! from acquire/release operations. If weak memory orderings are explored then this
//! may need to change or be updated accordingly.
//!
//! Per the C++ spec for the memory model a sequentially consistent operation:
//! "A load operation with this memory order performs an acquire operation,
//! a store performs a release operation, and read-modify-write performs
//! both an acquire operation and a release operation, plus a single total
//! order exists in which all threads observe all modifications in the same
//! order (see Sequentially-consistent ordering below) "
//! So in the absence of weak memory effects a seq-cst load & a seq-cst store is identical
//! to a acquire load and a release store given the global sequentially consistent order
//! of the schedule.
//!
//! The timestamps used in the data-race detector assign each sequence of non-atomic operations
//! followed by a single atomic or concurrent operation a single timestamp.
//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
//! This is because extra increment operations between the operations in the sequence are not
//! required for accurate reporting of data-race values.
//!
//! As per the paper a threads timestamp is only incremented after a release operation is performed
//! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
//! code some atomic operations may increment the timestamp when not necessary but this has no effect
//! on the data-race detection code.
//!
//! FIXME:
//! currently we have our own local copy of the currently active thread index and names, this is due
//! in part to the inability to access the current location of threads.active_thread inside the AllocExtra
//! read, write and deallocate functions and should be cleaned up in the future.
use std::{
cell::{Cell, Ref, RefCell, RefMut},
fmt::Debug,
mem,
rc::Rc,
};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::{mir, ty::layout::TyAndLayout};
use rustc_target::abi::Size;
use crate::{
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
VectorIdx, MemoryKind, MiriMemoryKind
};
pub type AllocExtra = VClockAlloc;
pub type MemoryExtra = Rc<GlobalState>;
/// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicRwOp {
Relaxed,
Acquire,
Release,
AcqRel,
SeqCst,
}
/// Valid atomic read operations, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicReadOp {
Relaxed,
Acquire,
SeqCst,
}
/// Valid atomic write operations, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicWriteOp {
Relaxed,
Release,
SeqCst,
}
/// Valid atomic fence operations, subset of atomic::Ordering.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AtomicFenceOp {
Acquire,
Release,
AcqRel,
SeqCst,
}
/// The current set of vector clocks describing the state
/// of a thread, contains the happens-before clock and
/// additional metadata to model atomic fence operations.
#[derive(Clone, Default, Debug)]
struct ThreadClockSet {
/// The increasing clock representing timestamps
/// that happen-before this thread.
clock: VClock,
/// The set of timestamps that will happen-before this
/// thread once it performs an acquire fence.
fence_acquire: VClock,
/// The last timestamp of happens-before relations that
/// have been released by this thread by a fence.
fence_release: VClock,
}
impl ThreadClockSet {
/// Apply the effects of a release fence to this
/// set of thread vector clocks.
#[inline]
fn apply_release_fence(&mut self) {
self.fence_release.clone_from(&self.clock);
}
/// Apply the effects of a acquire fence to this
/// set of thread vector clocks.
#[inline]
fn apply_acquire_fence(&mut self) {
self.clock.join(&self.fence_acquire);
}
/// Increment the happens-before clock at a
/// known index.
#[inline]
fn increment_clock(&mut self, index: VectorIdx) {
self.clock.increment_index(index);
}
/// Join the happens-before clock with that of
/// another thread, used to model thread join
/// operations.
fn join_with(&mut self, other: &ThreadClockSet) {
self.clock.join(&other.clock);
}
}
/// Error returned by finding a data race
/// should be elaborated upon.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DataRace;
/// Externally stored memory cell clocks
/// explicitly to reduce memory usage for the
/// common case where no atomic operations
/// exists on the memory cell.
#[derive(Clone, PartialEq, Eq, Default, Debug)]
struct AtomicMemoryCellClocks {
/// The clock-vector of the timestamp of the last atomic
/// read operation performed by each thread.
/// This detects potential data-races between atomic read
/// and non-atomic write operations.
read_vector: VClock,
/// The clock-vector of the timestamp of the last atomic
/// write operation performed by each thread.
/// This detects potential data-races between atomic write
/// and non-atomic read or write operations.
write_vector: VClock,
/// Synchronization vector for acquire-release semantics
/// contains the vector of timestamps that will
/// happen-before a thread if an acquire-load is
/// performed on the data.
sync_vector: VClock,
}
/// Type of write operation: allocating memory
/// non-atomic writes and deallocating memory
/// are all treated as writes for the purpose
/// of the data-race detector.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum WriteType {
/// Allocate memory.
Allocate,
/// Standard unsynchronized write.
Write,
/// Deallocate memory.
/// Note that when memory is deallocated first, later non-atomic accesses
/// will be reported as use-after-free, not as data races.
/// (Same for `Allocate` above.)
Deallocate,
}
impl WriteType {
fn get_descriptor(self) -> &'static str {
match self {
WriteType::Allocate => "Allocate",
WriteType::Write => "Write",
WriteType::Deallocate => "Deallocate",
}
}
}
/// Memory Cell vector clock metadata
/// for data-race detection.
#[derive(Clone, PartialEq, Eq, Debug)]
struct MemoryCellClocks {
/// The vector-clock timestamp of the last write
/// corresponding to the writing threads timestamp.
write: VTimestamp,
/// The identifier of the vector index, corresponding to a thread
/// that performed the last write operation.
write_index: VectorIdx,
/// The type of operation that the write index represents,
/// either newly allocated memory, a non-atomic write or
/// a deallocation of memory.
write_type: WriteType,
/// The vector-clock of the timestamp of the last read operation
/// performed by a thread since the last write operation occurred.
/// It is reset to zero on each write operation.
read: VClock,
/// Atomic acquire & release sequence tracking clocks.
/// For non-atomic memory in the common case this
/// value is set to None.
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
}
impl MemoryCellClocks {
/// Create a new set of clocks representing memory allocated
/// at a given vector timestamp and index.
fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self |
/// Load the internal atomic memory cells if they exist.
#[inline]
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
match &self.atomic_ops {
Some(op) => Some(&*op),
None => None,
}
}
/// Load or create the internal atomic memory metadata
/// if it does not exist.
#[inline]
fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
self.atomic_ops.get_or_insert_with(Default::default)
}
/// Update memory cell data-race tracking for atomic
/// load acquire semantics, is a no-op if this memory was
/// not used previously as atomic memory.
fn load_acquire(
&mut self,
clocks: &mut ThreadClockSet,
index: VectorIdx,
) -> Result<(), DataRace> {
self.atomic_read_detect(clocks, index)?;
if let Some(atomic) = self.atomic() {
clocks.clock.join(&atomic.sync_vector);
}
Ok(())
}
/// Update memory cell data-race tracking for atomic
/// load relaxed semantics, is a no-op if this memory was
/// not used previously as atomic memory.
fn load_relaxed(
&mut self,
clocks: &mut ThreadClockSet,
index: VectorIdx,
) -> Result<(), DataRace> {
self.atomic_read_detect(clocks, index)?;
if let Some(atomic) = self.atomic() {
clocks.fence_acquire.join(&atomic.sync_vector);
}
Ok(())
}
/// Update the memory cell data-race tracking for atomic
/// store release semantics.
fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
let atomic = self.atomic_mut();
atomic.sync_vector.clone_from(&clocks.clock);
Ok(())
}
/// Update the memory cell data-race tracking for atomic
/// store relaxed semantics.
fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
// The handling of release sequences was changed in C++20 and so
// the code here is different to the paper since now all relaxed
// stores block release sequences. The exception for same-thread
// relaxed stores has been removed.
let atomic = self.atomic_mut();
atomic.sync_vector.clone_from(&clocks.fence_release);
Ok(())
}
/// Update the memory cell data-race tracking for atomic
/// store release semantics for RMW operations.
fn rmw_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
let atomic = self.atomic_mut();
atomic.sync_vector.join(&clocks.clock);
Ok(())
}
/// Update the memory cell data-race tracking for atomic
/// store relaxed semantics for RMW operations.
fn rmw_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
let atomic = self.atomic_mut();
atomic.sync_vector.join(&clocks.fence_release);
Ok(())
}
/// Detect data-races with an atomic read, caused by a non-atomic write that does
/// not happen-before the atomic-read.
fn atomic_read_detect(
&mut self,
clocks: &ThreadClockSet,
index: VectorIdx,
) -> Result<(), DataRace> {
log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
if self.write <= clocks.clock[self.write_index] {
let atomic = self.atomic_mut();
atomic.read_vector.set_at_index(&clocks.clock, index);
Ok(())
} else {
Err(DataRace)
}
}
/// Detect data-races with an atomic write, either with a non-atomic read or with
/// a non-atomic write.
fn atomic_write_detect(
&mut self,
clocks: &ThreadClockSet,
index: VectorIdx,
) -> Result<(), DataRace> {
log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
let atomic = self.atomic_mut();
atomic.write_vector.set_at_index(&clocks.clock, index);
Ok(())
} else {
Err(DataRace)
}
}
/// Detect races for non-atomic read operations at the current memory cell
/// returns true if a data-race is detected.
fn read_race_detect(
&mut self,
clocks: &ThreadClockSet,
index: VectorIdx,
) -> Result<(), DataRace> {
log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
if self.write <= clocks.clock[self.write_index] {
let race_free = if let Some(atomic) = self.atomic() {
atomic.write_vector <= clocks.clock
} else {
true
};
if race_free {
self.read.set_at_index(&clocks.clock, index);
Ok(())
} else {
Err(DataRace)
}
} else {
Err(DataRace)
}
}
/// Detect races for non-atomic write operations at the current memory cell
/// returns true if a data-race is detected.
fn write_race_detect(
&mut self,
clocks: &ThreadClockSet,
index: VectorIdx,
write_type: WriteType,
) -> Result<(), DataRace> {
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
let race_free = if let Some(atomic) = self.atomic() {
atomic.write_vector <= clocks.clock && atomic.read_vector <= clocks.clock
} else {
true
};
if race_free {
self.write = clocks.clock[index];
self.write_index = index;
self.write_type = write_type;
self.read.set_zero_vector();
Ok(())
} else {
Err(DataRace)
}
} else {
Err(DataRace)
}
}
}
/// Evaluation context extensions.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
&self,
op: OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
this.read_scalar_atomic(value_place, atomic)
}
/// Atomic variant of write_scalar_at_offset.
fn write_scalar_at_offset_atomic(
&mut self,
op: OpTy<'tcx, Tag>,
offset: u64,
value: impl Into<ScalarMaybeUninit<Tag>>,
layout: TyAndLayout<'tcx>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
this.write_scalar_atomic(value.into(), value_place, atomic)
}
/// Perform an atomic read operation at the memory location.
fn read_scalar_atomic(
&self,
place: MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place.into()))?;
self.validate_atomic_load(place, atomic)?;
Ok(scalar)
}
/// Perform an atomic write operation at the memory location.
fn write_scalar_atomic(
&mut self,
val: ScalarMaybeUninit<Tag>,
dest: MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.allow_data_races_mut(move |this| this.write_scalar(val, dest.into()))?;
self.validate_atomic_store(dest, atomic)
}
/// Perform a atomic operation on a memory location.
fn atomic_op_immediate(
&mut self,
place: MPlaceTy<'tcx, Tag>,
rhs: ImmTy<'tcx, Tag>,
op: mir::BinOp,
neg: bool,
atomic: AtomicRwOp,
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
// Atomics wrap around on overflow.
let val = this.binary_op(op, old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
this.allow_data_races_mut(|this| this.write_immediate(*val, place.into()))?;
this.validate_atomic_rmw(place, atomic)?;
Ok(old)
}
/// Perform an atomic exchange with a memory place and a new
/// scalar value, the old value is returned.
fn atomic_exchange_scalar(
&mut self,
place: MPlaceTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
atomic: AtomicRwOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_mut();
let old = this.allow_data_races_mut(|this| this.read_scalar(place.into()))?;
this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
this.validate_atomic_rmw(place, atomic)?;
Ok(old)
}
/// Perform an atomic compare and exchange at a given memory location.
/// On success an atomic RMW operation is performed and on failure
/// only an atomic read occurs. If `can_fail_spuriously` is true,
/// then we treat it as a "compare_exchange_weak" operation, and
/// some portion of the time fail even when the values are actually
/// identical.
fn atomic_compare_exchange_scalar(
&mut self,
place: MPlaceTy<'tcx, Tag>,
expect_old: ImmTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
can_fail_spuriously: bool,
) -> InterpResult<'tcx, Immediate<Tag>> {
use rand::Rng as _;
let this = self.eval_context_mut();
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
// If the operation would succeed, but is "weak", fail some portion
// of the time, based on `rate`.
let rate = this.memory.extra.cmpxchg_weak_failure_rate;
let cmpxchg_success = eq.to_bool()?
&& (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::<f64>() < rate);
let res = Immediate::ScalarPair(
old.to_scalar_or_uninit(),
Scalar::from_bool(cmpxchg_success).into(),
);
// Update ptr depending on comparison.
// if successful, perform a full rw-atomic validation
// otherwise treat this as an atomic load with the fail ordering.
if cmpxchg_success {
this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
this.validate_atomic_rmw(place, success)?;
} else {
this.validate_atomic_load(place, fail)?;
}
// Return the old value.
Ok(res)
}
/// Update the data-race detector for an atomic read occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_load(
&self,
place: MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_atomic_op(
place,
atomic,
"Atomic Load",
move |memory, clocks, index, atomic| {
if atomic == AtomicReadOp::Relaxed {
memory.load_relaxed(&mut *clocks, index)
} else {
memory.load_acquire(&mut *clocks, index)
}
},
)
}
/// Update the data-race detector for an atomic write occurring at the
/// associated memory-place and on the current thread.
fn validate_atomic_store(
&mut self,
place: MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
this.validate_atomic_op(
place,
atomic,
"Atomic Store",
move |memory, clocks, index, atomic| {
if atomic == AtomicWriteOp::Relaxed {
memory.store_relaxed(clocks, index)
} else {
memory.store_release(clocks, index)
}
},
)
}
/// Update the data-race detector for an atomic read-modify-write occurring
/// at the associated memory place and on the current thread.
fn validate_atomic_rmw(
&mut self,
place: MPlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
) -> InterpResult<'tcx> {
use AtomicRwOp::*;
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_ref();
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
} else {
memory.load_relaxed(clocks, index)?;
}
if release {
memory.rmw_release(clocks, index)
} else {
memory.rmw_relaxed(clocks, index)
}
})
}
/// Update the data-race detector for an atomic fence on the current thread.
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &this.memory.extra.data_race {
data_race.maybe_perform_sync_operation(move |index, mut clocks| {
log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
// Apply data-race detection for the current fences
// this treats AcqRel and SeqCst as the same as a acquire
// and release fence applied in the same timestamp.
if atomic != AtomicFenceOp::Release {
// Either Acquire | AcqRel | SeqCst
clocks.apply_acquire_fence();
}
if atomic != AtomicFenceOp::Acquire {
// Either Release | AcqRel | SeqCst
clocks.apply_release_fence();
}
// Increment timestamp in case of release semantics.
Ok(atomic != AtomicFenceOp::Acquire)
})
} else {
Ok(())
}
}
fn reset_vector_clocks(
&mut self,
ptr: Pointer<Tag>,
size: Size
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.memory.extra.data_race {
if data_race.multi_threaded.get() {
let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
alloc_meta.reset_clocks(ptr.offset, size);
}
}
Ok(())
}
}
/// Vector clock metadata for a logical memory allocation.
#[derive(Debug, Clone)]
pub struct VClockAlloc {
/// Assigning each byte a MemoryCellClocks.
alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
/// Pointer to global state.
global: MemoryExtra,
}
impl VClockAlloc {
/// Create a new data-race detector for newly allocated memory.
pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
let (alloc_timestamp, alloc_index) = match kind {
// User allocated and stack memory should track allocation.
MemoryKind::Machine(
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
) | MemoryKind::Stack => {
let (alloc_index, clocks) = global.current_thread_state();
let alloc_timestamp = clocks.clock[alloc_index];
(alloc_timestamp, alloc_index)
}
// Other global memory should trace races but be allocated at the 0 timestamp.
MemoryKind::Machine(
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
) | MemoryKind::CallerLocation | MemoryKind::Vtable => {
(0, VectorIdx::MAX_INDEX)
}
};
VClockAlloc {
global: Rc::clone(global),
alloc_ranges: RefCell::new(RangeMap::new(
len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
)),
}
}
fn reset_clocks(&mut self, offset: Size, len: Size) {
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (_, range) in alloc_ranges.iter_mut(offset, len) {
// Reset the portion of the range
*range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
}
}
// Find an index, if one exists where the value
// in `l` is greater than the value in `r`.
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
log::trace!("Find index where not {:?} <= {:?}", l, r);
let l_slice = l.as_slice();
let r_slice = r.as_slice();
l_slice
.iter()
.zip(r_slice.iter())
.enumerate()
.find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
.or_else(|| {
if l_slice.len() > r_slice.len() {
// By invariant, if l_slice is longer
// then one element must be larger.
// This just validates that this is true
// and reports earlier elements first.
let l_remainder_slice = &l_slice[r_slice.len()..];
let idx = l_remainder_slice
.iter()
.enumerate()
.find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
.expect("Invalid VClock Invariant");
Some(idx + r_slice.len())
} else {
None
}
})
.map(|idx| VectorIdx::new(idx))
}
/// Report a data-race found in the program.
/// This finds the two racing threads and the type
/// of data-race that occurred. This will also
/// return info about the memory location the data-race
/// occurred in.
#[cold]
#[inline(never)]
fn report_data_race<'tcx>(
global: &MemoryExtra,
range: &MemoryCellClocks,
action: &str,
is_atomic: bool,
pointer: Pointer<Tag>,
len: Size,
) -> InterpResult<'tcx> {
let (current_index, current_clocks) = global.current_thread_state();
let write_clock;
let (other_action, other_thread, other_clock) = if range.write
> current_clocks.clock[range.write_index]
{
// Convert the write action into the vector clock it
// represents for diagnostic purposes.
write_clock = VClock::new_with_index(range.write_index, range.write);
(range.write_type.get_descriptor(), range.write_index, &write_clock)
} else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
("Read", idx, &range.read)
} else if !is_atomic {
if let Some(atomic) = range.atomic() {
if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
{
("Atomic Store", idx, &atomic.write_vector)
} else if let Some(idx) =
Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
{
("Atomic Load", idx, &atomic.read_vector)
} else {
unreachable!(
"Failed to report data-race for non-atomic operation: no race found"
)
}
} else {
unreachable!(
"Failed to report data-race for non-atomic operation: no atomic component"
)
}
} else {
unreachable!("Failed to report data-race for atomic operation")
};
// Load elaborated thread information about the racing thread actions.
let current_thread_info = global.print_thread_metadata(current_index);
let other_thread_info = global.print_thread_metadata(other_thread);
// Throw the data-race detection.
throw_ub_format!(
"Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
\n(current vector clock = {:?}, conflicting timestamp = {:?})",
action,
current_thread_info,
other_action,
other_thread_info,
pointer.alloc_id,
pointer.offset.bytes(),
len.bytes(),
current_clocks.clock,
other_clock
)
}
/// Detect data-races for an unsynchronized read operation, will not perform
/// data-race detection if `multi-threaded` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation for which data-race detection is handled separately, for example
/// atomic read operations.
pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
if self.global.multi_threaded.get() {
let (index, clocks) = self.global.current_thread_state();
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (_, range) in alloc_ranges.iter_mut(pointer.offset, len) {
if let Err(DataRace) = range.read_race_detect(&*clocks, index) {
// Report data-race.
return Self::report_data_race(
&self.global,
range,
"Read",
false,
pointer,
len,
);
}
}
Ok(())
} else {
Ok(())
}
}
// Shared code for detecting data-races on unique access to a section of memory
fn unique_access<'tcx>(
&mut self,
pointer: Pointer<Tag>,
len: Size,
write_type: WriteType,
) -> InterpResult<'tcx> {
if self.global.multi_threaded.get() {
let (index, clocks) = self.global.current_thread_state();
for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
// Report data-race
return Self::report_data_race(
&self.global,
range,
write_type.get_descriptor(),
false,
pointer,
len,
);
}
}
Ok(())
} else {
Ok(())
}
}
/// Detect data-races for an unsynchronized write operation, will not perform
/// data-race threads if `multi-threaded` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation
pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
self.unique_access(pointer, len, WriteType::Write)
}
/// Detect data-races for an unsynchronized deallocate operation, will not perform
/// data-race threads if `multi-threaded` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation
pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
self.unique_access(pointer, len, WriteType::Deallocate)
}
}
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
// Temporarily allow data-races to occur, this should only be
// used if either one of the appropriate `validate_atomic` functions
// will be called to treat a memory access as atomic or if the memory
// being accessed should be treated as internal state, that cannot be
// accessed by the interpreted program.
#[inline]
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
let this = self.eval_context_ref();
let old = if let Some(data_race) = &this.memory.extra.data_race {
data_race.multi_threaded.replace(false)
} else {
false
};
let result = op(this);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.multi_threaded.set(old);
}
result
}
/// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
/// so should only be used for atomic operations or internal state that the program cannot
/// access.
#[inline]
fn allow_data_races_mut<R>(
&mut self,
op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
) -> R {
let this = self.eval_context_mut();
let old = if let Some(data_race) = &this.memory.extra.data_race {
data_race.multi_threaded.replace(false)
} else {
false
};
let result = op(this);
if let Some(data_race) = &this.memory.extra.data_race {
data_race.multi_threaded.set(old);
}
result
}
/// Generic atomic operation implementation,
/// this accesses memory via get_raw instead of
/// get_raw_mut, due to issues calling get_raw_mut
/// for atomic loads from read-only memory.
/// FIXME: is this valid, or should get_raw_mut be used for
/// atomic-stores/atomic-rmw?
fn validate_atomic_op<A: Debug + Copy>(
&self,
place: MPlaceTy<'tcx, Tag>,
atomic: A,
description: &str,
mut op: impl FnMut(
&mut MemoryCellClocks,
&mut ThreadClockSet,
VectorIdx,
A,
) -> Result<(), DataRace>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
if let Some(data_race) = &this.memory.extra.data_race {
if data_race.multi_threaded.get() {
// Load and log the atomic operation.
let place_ptr = place.ptr.assert_ptr();
let size = place.layout.size;
let alloc_meta =
&this.memory.get_raw(place_ptr.alloc_id)?.extra.data_race.as_ref().unwrap();
log::trace!(
"Atomic op({}) with ordering {:?} on memory({:?}, offset={}, size={})",
description,
&atomic,
place_ptr.alloc_id,
place_ptr.offset.bytes(),
size.bytes()
);
// Perform the atomic operation.
let data_race = &alloc_meta.global;
data_race.maybe_perform_sync_operation(|index, mut clocks| {
for (_, range) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(place_ptr.offset, size)
{
if let Err(DataRace) = op(range, &mut *clocks, index, atomic) {
mem::drop(clocks);
return VClockAlloc::report_data_race(
&alloc_meta.global,
range,
description,
true,
place_ptr,
size,
).map(|_| true);
}
}
// This conservatively assumes all operations have release semantics
Ok(true)
})?;
// Log changes to atomic memory.
if log::log_enabled!(log::Level::Trace) {
for (_, range) in alloc_meta.alloc_ranges.borrow().iter(place_ptr.offset, size)
{
log::trace!(
"Updated atomic memory({:?}, offset={}, size={}) to {:#?}",
place.ptr.assert_ptr().alloc_id,
place_ptr.offset.bytes(),
size.bytes(),
range.atomic_ops
);
}
}
}
}
Ok(())
}
}
/// Extra metadata associated with a thread.
#[derive(Debug, Clone, Default)]
struct ThreadExtraState {
/// The current vector index in use by the
/// thread currently, this is set to None
/// after the vector index has been re-used
/// and hence the value will never need to be
/// read during data-race reporting.
vector_index: Option<VectorIdx>,
/// The name of the thread, updated for better
/// diagnostics when reporting detected data
/// races.
thread_name: Option<Box<str>>,
/// Thread termination vector clock, this
/// is set on thread termination and is used
/// for joining on threads since the vector_index
/// may be re-used when the join operation occurs.
termination_vector_clock: Option<VClock>,
}
/// Global data-race detection state, contains the currently
/// executing thread as well as the vector-clocks associated
/// with each of the threads.
#[derive(Debug, Clone)]
pub struct GlobalState {
/// Set to true once the first additional
/// thread has launched, due to the dependency
/// between before and after a thread launch.
/// Any data-races must be recorded after this
/// so concurrent execution can ignore recording
/// any data-races.
multi_threaded: Cell<bool>,
/// Mapping of a vector index to a known set of thread
/// clocks, this is not directly mapping from a thread id
/// since it may refer to multiple threads.
vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
/// Mapping of a given vector index to the current thread
/// that the execution is representing, this may change
/// if a vector index is re-assigned to a new thread.
vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
/// The mapping of a given thread to associated thread metadata.
thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
/// The current vector index being executed.
current_index: Cell<VectorIdx>,
/// Potential vector indices that could be re-used on thread creation
/// values are inserted here on after the thread has terminated and
/// been joined with, and hence may potentially become free
/// for use as the index for a new thread.
/// Elements in this set may still require the vector index to
/// report data-races, and can only be re-used after all
/// active vector-clocks catch up with the threads timestamp.
reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
/// Counts the number of threads that are currently active
/// if the number of active threads reduces to 1 and then
/// a join operation occurs with the remaining main thread
/// then multi-threaded execution may be disabled.
active_thread_count: Cell<usize>,
/// This contains threads that have terminated, but not yet joined
/// and so cannot become re-use candidates until a join operation
/// occurs.
/// The associated vector index will be moved into re-use candidates
/// after the join operation occurs.
terminated_threads: RefCell<FxHashMap<ThreadId, VectorIdx>>,
}
impl GlobalState {
/// Create a new global state, setup with just thread-id=0
/// advanced to timestamp = 1.
pub fn new() -> Self {
let global_state = GlobalState {
multi_threaded: Cell::new(false),
vector_clocks: RefCell::new(IndexVec::new()),
vector_info: RefCell::new(IndexVec::new()),
thread_info: RefCell::new(IndexVec::new()),
current_index: Cell::new(VectorIdx::new(0)),
active_thread_count: Cell::new(1),
reuse_candidates: RefCell::new(FxHashSet::default()),
terminated_threads: RefCell::new(FxHashMap::default()),
};
// Setup the main-thread since it is not explicitly created:
// uses vector index and thread-id 0, also the rust runtime gives
// the main-thread a name of "main".
let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default());
global_state.vector_info.borrow_mut().push(ThreadId::new(0));
global_state.thread_info.borrow_mut().push(ThreadExtraState {
vector_index: Some(index),
thread_name: Some("main".to_string().into_boxed_str()),
termination_vector_clock: None,
});
global_state
}
// Try to find vector index values that can potentially be re-used
// by a new thread instead of a new vector index being created.
fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
let mut reuse = self.reuse_candidates.borrow_mut();
let vector_clocks = self.vector_clocks.borrow();
let vector_info = self.vector_info.borrow();
let terminated_threads = self.terminated_threads.borrow();
for &candidate in reuse.iter() {
let target_timestamp = vector_clocks[candidate].clock[candidate];
if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
// The thread happens before the clock, and hence cannot report
// a data-race with this the candidate index.
let no_data_race = clock.clock[candidate] >= target_timestamp;
// The vector represents a thread that has terminated and hence cannot
// report a data-race with the candidate index.
let thread_id = vector_info[clock_idx];
let vector_terminated =
reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
// The vector index cannot report a race with the candidate index
// and hence allows the candidate index to be re-used.
no_data_race || vector_terminated
}) {
// All vector clocks for each vector index are equal to
// the target timestamp, and the thread is known to have
// terminated, therefore this vector clock index cannot
// report any more data-races.
assert!(reuse.remove(&candidate));
return Some(candidate);
}
}
None
}
// Hook for thread creation, enabled multi-threaded execution and marks
// the current thread timestamp as happening-before the current thread.
#[inline]
pub fn thread_created(&self, thread: ThreadId) {
let current_index = self.current_index();
// Increment the number of active threads.
let active_threads = self.active_thread_count.get();
self.active_thread_count.set(active_threads + 1);
// Enable multi-threaded execution, there are now two threads
// so data-races are now possible.
self.multi_threaded.set(true);
// Load and setup the associated thread metadata
let mut thread_info = self.thread_info.borrow_mut();
thread_info.ensure_contains_elem(thread, Default::default);
// Assign a vector index for the thread, attempting to re-use an old
// vector index that can no longer report any data-races if possible.
let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
// Now re-configure the re-use candidate, increment the clock
// for the new sync use of the vector.
let mut vector_clocks = self.vector_clocks.borrow_mut();
vector_clocks[reuse_index].increment_clock(reuse_index);
// Locate the old thread the vector was associated with and update
// it to represent the new thread instead.
let mut vector_info = self.vector_info.borrow_mut();
let old_thread = vector_info[reuse_index];
vector_info[reuse_index] = thread;
// Mark the thread the vector index was associated with as no longer
// representing a thread index.
thread_info[old_thread].vector_index = None;
reuse_index
} else {
// No vector re-use candidates available, instead create
// a new vector index.
let mut vector_info = self.vector_info.borrow_mut();
vector_info.push(thread)
};
log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
// Mark the chosen vector index as in use by the thread.
thread_info[thread].vector_index = Some(created_index);
// Create a thread clock set if applicable.
let mut vector_clocks = self.vector_clocks.borrow_mut();
if created_index == vector_clocks.next_index() {
vector_clocks.push(ThreadClockSet::default());
}
// Now load the two clocks and configure the initial state.
let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
// Join the created with current, since the current threads
// previous actions happen-before the created thread.
created.join_with(current);
// Advance both threads after the synchronized operation.
// Both operations are considered to have release semantics.
current.increment_clock(current_index);
created.increment_clock(created_index);
}
/// Hook on a thread join to update the implicit happens-before relation
/// between the joined thread and the current thread.
#[inline]
pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
let mut clocks_vec = self.vector_clocks.borrow_mut();
let thread_info = self.thread_info.borrow();
// Load the vector clock of the current thread.
let current_index = thread_info[current_thread]
.vector_index
.expect("Performed thread join on thread with no assigned vector");
let current = &mut clocks_vec[current_index];
// Load the associated vector clock for the terminated thread.
let join_clock = thread_info[join_thread]
.termination_vector_clock
.as_ref()
.expect("Joined with thread but thread has not terminated");
// The join thread happens-before the current thread
// so update the current vector clock.
// Is not a release operation so the clock is not incremented.
current.clock.join(join_clock);
// Check the number of active threads, if the value is 1
// then test for potentially disabling multi-threaded execution.
let active_threads = self.active_thread_count.get();
if active_threads == 1 {
// May potentially be able to disable multi-threaded execution.
let current_clock = &clocks_vec[current_index];
if clocks_vec
.iter_enumerated()
.all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
{
// All thread terminations happen-before the current clock
// therefore no data-races can be reported until a new thread
// is created, so disable multi-threaded execution.
self.multi_threaded.set(false);
}
}
// If the thread is marked as terminated but not joined
// then move the thread to the re-use set.
let mut termination = self.terminated_threads.borrow_mut();
if let Some(index) = termination.remove(&join_thread) {
let mut reuse = self.reuse_candidates.borrow_mut();
reuse.insert(index);
}
}
/// On thread termination, the vector-clock may re-used
/// in the future once all remaining thread-clocks catch
/// up with the time index of the terminated thread.
/// This assigns thread termination with a unique index
/// which will be used to join the thread
/// This should be called strictly before any calls to
/// `thread_joined`.
#[inline]
pub fn thread_terminated(&self) {
let current_index = self.current_index();
// Increment the clock to a unique termination timestamp.
let mut vector_clocks = self.vector_clocks.borrow_mut();
let current_clocks = &mut vector_clocks[current_index];
current_clocks.increment_clock(current_index);
// Load the current thread id for the executing vector.
let vector_info = self.vector_info.borrow();
let current_thread = vector_info[current_index];
// Load the current thread metadata, and move to a terminated
// vector state. Setting up the vector clock all join operations
// will use.
let mut thread_info = self.thread_info.borrow_mut();
let current = &mut thread_info[current_thread];
current.termination_vector_clock = Some(current_clocks.clock.clone());
// Add this thread as a candidate for re-use after a thread join
// occurs.
let mut termination = self.terminated_threads.borrow_mut();
termination.insert(current_thread, current_index);
// Reduce the number of active threads, now that a thread has
// terminated.
let mut active_threads = self.active_thread_count.get();
active_threads -= 1;
self.active_thread_count.set(active_threads);
}
/// Hook for updating the local tracker of the currently
/// enabled thread, should always be updated whenever
/// `active_thread` in thread.rs is updated.
#[inline]
pub fn thread_set_active(&self, thread: ThreadId) {
let thread_info = self.thread_info.borrow();
let vector_idx = thread_info[thread]
.vector_index
.expect("Setting thread active with no assigned vector");
self.current_index.set(vector_idx);
}
/// Hook for updating the local tracker of the threads name
/// this should always mirror the local value in thread.rs
/// the thread name is used for improved diagnostics
/// during a data-race.
#[inline]
pub fn thread_set_name(&self, thread: ThreadId, name: String) {
let name = name.into_boxed_str();
let mut thread_info = self.thread_info.borrow_mut();
thread_info[thread].thread_name = Some(name);
}
/// Attempt to perform a synchronized operation, this
/// will perform no operation if multi-threading is
/// not currently enabled.
/// Otherwise it will increment the clock for the current
/// vector before and after the operation for data-race
/// detection between any happens-before edges the
/// operation may create.
fn maybe_perform_sync_operation<'tcx>(
&self,
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
) -> InterpResult<'tcx> {
if self.multi_threaded.get() {
let (index, clocks) = self.current_thread_state_mut();
if op(index, clocks)? {
let (_, mut clocks) = self.current_thread_state_mut();
clocks.increment_clock(index);
}
}
Ok(())
}
/// Internal utility to identify a thread stored internally
/// returns the id and the name for better diagnostics.
fn print_thread_metadata(&self, vector: VectorIdx) -> String {
let thread = self.vector_info.borrow()[vector];
let thread_name = &self.thread_info.borrow()[thread].thread_name;
if let Some(name) = thread_name {
let name: &str = name;
format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
} else {
format!("Thread(id = {:?})", thread.to_u32())
}
}
/// Acquire a lock, express that the previous call of
/// `validate_lock_release` must happen before this.
/// As this is an acquire operation, the thread timestamp is not
/// incremented.
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
let (_, mut clocks) = self.load_thread_state_mut(thread);
clocks.clock.join(&lock);
}
/// Release a lock handle, express that this happens-before
/// any subsequent calls to `validate_lock_acquire`.
/// For normal locks this should be equivalent to `validate_lock_release_shared`
/// since an acquire operation should have occurred before, however
/// for futex & condvar operations this is not the case and this
/// operation must be used.
pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
lock.clone_from(&clocks.clock);
clocks.increment_clock(index);
}
/// Release a lock handle, express that this happens-before
/// any subsequent calls to `validate_lock_acquire` as well
/// as any previous calls to this function after any
/// `validate_lock_release` calls.
/// For normal locks this should be equivalent to `validate_lock_release`.
/// This function only exists for joining over the set of concurrent readers
/// in a read-write lock and should not be used for anything else.
pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
let (index, mut clocks) = self.load_thread_state_mut(thread);
lock.join(&clocks.clock);
clocks.increment_clock(index);
}
/// Load the vector index used by the given thread as well as the set of vector clocks
/// used by the thread.
#[inline]
fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.thread_info.borrow()[thread]
.vector_index
.expect("Loading thread state for thread with no assigned vector");
let ref_vector = self.vector_clocks.borrow_mut();
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
(index, clocks)
}
/// Load the current vector clock in use and the current set of thread clocks
/// in use for the vector.
#[inline]
fn current_thread_state(&self) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
let index = self.current_index();
let ref_vector = self.vector_clocks.borrow();
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
(index, clocks)
}
/// Load the current vector clock in use and the current set of thread clocks
/// in use for the vector mutably for modification.
#[inline]
fn current_thread_state_mut(&self) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
let index = self.current_index();
let ref_vector = self.vector_clocks.borrow_mut();
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
(index, clocks)
}
/// Return the current thread, should be the same
/// as the data-race active thread.
#[inline]
fn current_index(&self) -> VectorIdx {
self.current_index.get()
}
}
| {
MemoryCellClocks {
read: VClock::default(),
write: alloc,
write_index: alloc_index,
write_type: WriteType::Allocate,
atomic_ops: None,
}
} |
mod.rs | use ethers_core::{
abi::{Detokenize, Function, Token},
types::{Address, BlockNumber, NameOrAddress, TxHash, U256},
};
use ethers_providers::Middleware;
use std::{collections::HashMap, str::FromStr, sync::Arc};
use crate::{
call::{ContractCall, ContractError},
Lazy,
};
mod multicall_contract;
use multicall_contract::MulticallContract;
/// A lazily computed hash map with the Ethereum network IDs as keys and the corresponding
/// Multicall smart contract addresses as values
pub static ADDRESS_BOOK: Lazy<HashMap<U256, Address>> = Lazy::new(|| {
let mut m = HashMap::new();
// mainnet
let addr =
Address::from_str("eefba1e63905ef1d7acba5a8513c70307c1ce441").expect("Decoding failed");
m.insert(U256::from(1u8), addr);
// rinkeby
let addr =
Address::from_str("42ad527de7d4e9d9d011ac45b31d8551f8fe9821").expect("Decoding failed");
m.insert(U256::from(4u8), addr);
// goerli
let addr =
Address::from_str("77dca2c955b15e9de4dbbcf1246b4b85b651e50e").expect("Decoding failed");
m.insert(U256::from(5u8), addr);
// kovan
let addr =
Address::from_str("2cc8688c5f75e365aaeeb4ea8d6a480405a48d2a").expect("Decoding failed");
m.insert(U256::from(42u8), addr);
m
});
/// A Multicall is an abstraction for sending batched calls/transactions to the Ethereum blockchain.
/// It stores an instance of the [`Multicall` smart contract](https://etherscan.io/address/0xeefba1e63905ef1d7acba5a8513c70307c1ce441#code)
/// and the user provided list of transactions to be made.
///
/// `Multicall` can instantiate the Multicall contract instance from the chain ID of the client
/// supplied to [`new`]. It supports the Ethereum mainnet, as well as testnets
/// [Rinkeby](https://rinkeby.etherscan.io/address/0x42ad527de7d4e9d9d011ac45b31d8551f8fe9821#code),
/// [Goerli](https://goerli.etherscan.io/address/0x77dca2c955b15e9de4dbbcf1246b4b85b651e50e) and
/// [Kovan](https://kovan.etherscan.io/address/0x2cc8688c5f75e365aaeeb4ea8d6a480405a48d2a#code).
///
/// Additionally, the `block` number can be provided for the call by using the [`block`] method.
/// Build on the `Multicall` instance by adding calls using the [`add_call`] method.
///
/// # Example
///
/// ```no_run
/// use ethers::{
/// abi::Abi,
/// contract::{Contract, Multicall},
/// providers::{Middleware, Http, Provider, PendingTransaction},
/// types::{Address, H256, U256},
/// };
/// use std::{convert::TryFrom, sync::Arc};
///
/// # async fn bar() -> Result<(), Box<dyn std::error::Error>> {
/// // this is a dummy address used for illustration purpose
/// let address = "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee".parse::<Address>()?;
///
/// // (ugly way to write the ABI inline, you can otherwise read it from a file)
/// let abi: Abi = serde_json::from_str(r#"[{"inputs":[{"internalType":"string","name":"value","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"author","type":"address"},{"indexed":true,"internalType":"address","name":"oldAuthor","type":"address"},{"indexed":false,"internalType":"string","name":"oldValue","type":"string"},{"indexed":false,"internalType":"string","name":"newValue","type":"string"}],"name":"ValueChanged","type":"event"},{"inputs":[],"name":"getValue","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"lastSender","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"string","name":"value","type":"string"}],"name":"setValue","outputs":[],"stateMutability":"nonpayable","type":"function"}]"#)?;
///
/// // connect to the network
/// let client = Provider::<Http>::try_from("https://kovan.infura.io/v3/c60b0bb42f8a4c6481ecd229eddaca27")?;
///
/// // create the contract object. This will be used to construct the calls for multicall
/// let client = Arc::new(client);
/// let contract = Contract::<Provider<Http>>::new(address, abi, Arc::clone(&client));
///
/// // note that these [`ContractCall`]s are futures, and need to be `.await`ed to resolve.
/// // But we will let `Multicall` to take care of that for us
/// let first_call = contract.method::<_, String>("getValue", ())?;
/// let second_call = contract.method::<_, Address>("lastSender", ())?;
///
/// // since this example connects to the Kovan testnet, we need not provide an address for
/// // the Multicall contract and we set that to `None`. If you wish to provide the address
/// // for the Multicall contract, you can pass the `Some(multicall_addr)` argument.
/// // Construction of the `Multicall` instance follows the builder pattern
/// let mut multicall = Multicall::new(Arc::clone(&client), None).await?;
/// multicall
/// .add_call(first_call)
/// .add_call(second_call);
///
/// // `await`ing on the `call` method lets us fetch the return values of both the above calls
/// // in one single RPC call
/// let _return_data: (String, Address) = multicall.call().await?;
///
/// // the same `Multicall` instance can be re-used to do a different batch of transactions.
/// // Say we wish to broadcast (send) a couple of transactions via the Multicall contract.
/// let first_broadcast = contract.method::<_, H256>("setValue", "some value".to_owned())?;
/// let second_broadcast = contract.method::<_, H256>("setValue", "new value".to_owned())?;
/// let multicall = multicall
/// .clear_calls()
/// .add_call(first_broadcast)
/// .add_call(second_broadcast);
///
/// // `await`ing the `send` method waits for the transaction to be broadcast, which also
/// // returns the transaction hash
/// let tx_hash = multicall.send().await?;
/// let _tx_receipt = PendingTransaction::new(tx_hash, &client).await?;
///
/// // you can also query ETH balances of multiple addresses
/// let address_1 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".parse::<Address>()?;
/// let address_2 = "ffffffffffffffffffffffffffffffffffffffff".parse::<Address>()?;
/// let multicall = multicall
/// .clear_calls()
/// .eth_balance_of(address_1)
/// .eth_balance_of(address_2);
/// let _balances: (U256, U256) = multicall.call().await?;
/// # Ok(())
/// # }
/// ```
///
/// [`new`]: method@crate::Multicall::new
/// [`block`]: method@crate::Multicall::block
/// [`add_call`]: methond@crate::Multicall::add_call
#[derive(Clone)]
pub struct Multicall<M> {
calls: Vec<Call>,
block: Option<BlockNumber>,
contract: MulticallContract<M>,
}
#[derive(Clone)]
/// Helper struct for managing calls to be made to the `function` in smart contract `target`
/// with `data`
pub struct Call {
target: Address,
data: Vec<u8>,
function: Function,
}
impl<M: Middleware> Multicall<M> {
/// Creates a new Multicall instance from the provided client. If provided with an `address`,
/// it instantiates the Multicall contract with that address. Otherwise it fetches the address
/// from the address book.
///
/// # Panics
/// If a `None` address is provided, and the provided client also does not belong to one of
/// the supported network IDs (mainnet, kovan, rinkeby and goerli)
pub async fn new<C: Into<Arc<M>>>(
client: C,
address: Option<Address>,
) -> Result<Self, ContractError<M>> {
let client = client.into();
// Fetch chain id and the corresponding address of Multicall contract
// preference is given to Multicall contract's address if provided
// otherwise check the address book for the client's chain ID
let address: Address = match address {
Some(addr) => addr,
None => {
let chain_id = client
.get_chainid()
.await
.map_err(ContractError::MiddlewareError)?;
match ADDRESS_BOOK.get(&chain_id) {
Some(addr) => *addr,
None => panic!(
"Must either be a supported Network ID or provide Multicall contract address"
),
}
}
};
// Instantiate the multicall contract
let contract = MulticallContract::new(address, client);
Ok(Self {
calls: vec![],
block: None,
contract,
})
}
/// Sets the `block` field for the multicall aggregate call
pub fn block<T: Into<BlockNumber>>(mut self, block: T) -> Self {
self.block = Some(block.into());
self
}
/// Appends a `call` to the list of calls for the Multicall instance
///
/// # Panics
///
/// If more than the maximum number of supported calls are added. The maximum
/// limits is constrained due to tokenization/detokenization support for tuples
pub fn add_call<D: Detokenize>(&mut self, call: ContractCall<M, D>) -> &mut Self {
if self.calls.len() >= 16 {
panic!("Cannot support more than {} calls", 16);
}
match (call.tx.to, call.tx.data) {
(Some(NameOrAddress::Address(target)), Some(data)) => {
let call = Call {
target,
data: data.0,
function: call.function,
};
self.calls.push(call);
self
}
_ => self,
}
}
/// Appends a `call` to the list of calls for the Multicall instance for querying
/// the ETH balance of an address
///
/// # Panics
///
/// If more than the maximum number of supported calls are added. The maximum
/// limits is constrained due to tokenization/detokenization support for tuples
pub fn eth_balance_of(&mut self, addr: Address) -> &mut Self {
let call = self.contract.get_eth_balance(addr);
self.add_call(call)
}
/// Clear the batch of calls from the Multicall instance. Re-use the already instantiated
/// Multicall, to send a different batch of transactions or do another aggregate query
///
/// ```no_run
/// # async fn foo() -> Result<(), Box<dyn std::error::Error>> {
/// # use ethers::{abi::Abi, prelude::*};
/// # use std::{sync::Arc, convert::TryFrom};
/// #
/// # let client = Provider::<Http>::try_from("http://localhost:8545")?;
/// # let client = Arc::new(client);
/// #
/// # let abi: Abi = serde_json::from_str("")?;
/// # let address = "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee".parse::<Address>()?;
/// # let contract = Contract::<Provider<Http>>::new(address, abi, client.clone());
/// #
/// # let broadcast_1 = contract.method::<_, H256>("setValue", "some value".to_owned())?;
/// # let broadcast_2 = contract.method::<_, H256>("setValue", "new value".to_owned())?;
/// #
/// let mut multicall = Multicall::new(client, None).await?;
/// multicall
/// .add_call(broadcast_1)
/// .add_call(broadcast_2);
///
/// let _tx_hash = multicall.send().await?;
///
/// # let call_1 = contract.method::<_, String>("getValue", ())?;
/// # let call_2 = contract.method::<_, Address>("lastSender", ())?;
/// multicall
/// .clear_calls()
/// .add_call(call_1)
/// .add_call(call_2);
/// let return_data: (String, Address) = multicall.call().await?;
/// # Ok(())
/// # }
/// ```
pub fn clear_calls(&mut self) -> &mut Self {
self.calls.clear();
self
}
/// Queries the Ethereum blockchain via an `eth_call`, but via the Multicall contract.
///
/// It returns a [`ContractError<M>`] if there is any error in the RPC call or while
/// detokenizing the tokens back to the expected return type. The return type must be
/// annonated while calling this method.
///
/// ```no_run
/// # async fn foo() -> Result<(), Box<dyn std::error::Error>> {
/// # use ethers::prelude::*;
/// # use std::convert::TryFrom;
/// #
/// # let client = Provider::<Http>::try_from("http://localhost:8545")?;
/// #
/// # let multicall = Multicall::new(client, None).await?;
/// // If the Solidity function calls has the following return types:
/// // 1. `returns (uint256)`
/// // 2. `returns (string, address)`
/// // 3. `returns (bool)`
/// let result: (U256, (String, Address), bool) = multicall.call().await?;
/// # Ok(())
/// # }
/// ```
///
/// Note: this method _does not_ send a transaction from your account
///
/// [`ContractError<M>`]: crate::ContractError<M>
pub async fn call<D: Detokenize>(&self) -> Result<D, ContractError<M>> |
/// Signs and broadcasts a batch of transactions by using the Multicall contract as proxy.
///
/// ```no_run
/// # async fn foo() -> Result<(), Box<dyn std::error::Error>> {
/// # use ethers::prelude::*;
/// # use std::convert::TryFrom;
/// # let client = Provider::<Http>::try_from("http://localhost:8545")?;
/// # let multicall = Multicall::new(client, None).await?;
/// let tx_hash = multicall.send().await?;
/// # Ok(())
/// # }
/// ```
///
/// Note: this method sends a transaction from your account, and will return an error
/// if you do not have sufficient funds to pay for gas
pub async fn send(&self) -> Result<TxHash, ContractError<M>> {
let contract_call = self.as_contract_call();
// Broadcast transaction and return the transaction hash
// TODO: Can we make this return a PendingTransaction directly instead?
// Seems hard due to `returns a value referencing data owned by the current function`
let tx_hash = *contract_call.send().await?;
Ok(tx_hash)
}
fn as_contract_call(&self) -> ContractCall<M, (U256, Vec<Vec<u8>>)> {
// Map the Multicall struct into appropriate types for `aggregate` function
let calls: Vec<(Address, Vec<u8>)> = self
.calls
.iter()
.map(|call| (call.target, call.data.clone()))
.collect();
// Construct the ContractCall for `aggregate` function to broadcast the transaction
let contract_call = self.contract.aggregate(calls);
if let Some(block) = self.block {
contract_call.block(block)
} else {
contract_call
}
}
}
| {
let contract_call = self.as_contract_call();
// Fetch response from the Multicall contract
let (_block_number, return_data) = contract_call.call().await?;
// Decode return data into ABI tokens
let tokens = self
.calls
.iter()
.zip(&return_data)
.map(|(call, bytes)| {
let tokens: Vec<Token> = call.function.decode_output(&bytes)?;
Ok(match tokens.len() {
0 => Token::Tuple(vec![]),
1 => tokens[0].clone(),
_ => Token::Tuple(tokens),
})
})
.collect::<Result<Vec<Token>, ContractError<M>>>()?;
// Form tokens that represent tuples
let tokens = vec![Token::Tuple(tokens)];
// Detokenize from the tokens into the provided tuple D
let data = D::from_tokens(tokens)?;
Ok(data)
} |
mod.rs | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
mod builtin_arithmetic;
mod builtin_cast;
mod builtin_compare;
mod builtin_control;
mod builtin_encryption;
mod builtin_json;
mod builtin_like;
mod builtin_math;
mod builtin_miscellaneous;
mod builtin_op;
mod builtin_other;
mod builtin_string;
mod builtin_time;
mod column;
mod constant;
mod ctx;
mod scalar_function;
pub use self::ctx::*;
pub use coprocessor::codec::{Error, Result};
use coprocessor::codec::mysql::{charset, types};
use coprocessor::codec::mysql::{Decimal, Duration, Json, Time, MAX_FSP};
use coprocessor::codec::{self, Datum};
use std::borrow::Cow;
use std::str;
use tipb::expression::{Expr, ExprType, FieldType, ScalarFuncSig};
use util::codec::number;
#[derive(Debug, Clone, PartialEq)]
pub enum Expression {
Constant(Constant),
ColumnRef(Column),
ScalarFn(ScalarFunc),
}
#[derive(Debug, Clone, PartialEq)]
pub struct Column {
offset: usize,
tp: FieldType,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Constant {
val: Datum,
tp: FieldType,
}
/// A single scalar function call
#[derive(Debug, Clone, PartialEq)]
pub struct ScalarFunc {
sig: ScalarFuncSig,
children: Vec<Expression>,
tp: FieldType,
}
impl Expression {
fn new_const(v: Datum, field_type: FieldType) -> Expression {
Expression::Constant(Constant {
val: v,
tp: field_type,
})
}
#[inline]
fn get_tp(&self) -> &FieldType {
match *self {
Expression::Constant(ref c) => &c.tp,
Expression::ColumnRef(ref c) => &c.tp,
Expression::ScalarFn(ref c) => &c.tp,
}
}
#[cfg(test)]
#[inline]
fn mut_tp(&mut self) -> &mut FieldType {
match *self {
Expression::Constant(ref mut c) => &mut c.tp,
Expression::ColumnRef(ref mut c) => &mut c.tp,
Expression::ScalarFn(ref mut c) => &mut c.tp,
}
}
#[cfg_attr(feature = "cargo-clippy", allow(match_same_arms))]
fn eval_int(&self, ctx: &mut EvalContext, row: &[Datum]) -> Result<Option<i64>> {
match *self {
Expression::Constant(ref constant) => constant.eval_int(),
Expression::ColumnRef(ref column) => column.eval_int(row),
Expression::ScalarFn(ref f) => f.eval_int(ctx, row),
}
}
fn eval_real(&self, ctx: &mut EvalContext, row: &[Datum]) -> Result<Option<f64>> {
match *self {
Expression::Constant(ref constant) => constant.eval_real(),
Expression::ColumnRef(ref column) => column.eval_real(row),
Expression::ScalarFn(ref f) => f.eval_real(ctx, row),
}
}
#[cfg_attr(feature = "cargo-clippy", allow(match_same_arms))]
fn eval_decimal<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, Decimal>>> {
match *self {
Expression::Constant(ref constant) => constant.eval_decimal(),
Expression::ColumnRef(ref column) => column.eval_decimal(row),
Expression::ScalarFn(ref f) => f.eval_decimal(ctx, row),
}
}
fn eval_string<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, [u8]>>> {
match *self {
Expression::Constant(ref constant) => constant.eval_string(),
Expression::ColumnRef(ref column) => column.eval_string(ctx, row),
Expression::ScalarFn(ref f) => f.eval_bytes(ctx, row),
}
}
fn eval_string_and_decode<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, str>>> {
let bytes = try_opt!(self.eval_string(ctx, row));
let chrst = self.get_tp().get_charset();
if charset::UTF8_CHARSETS.contains(&chrst) {
let s = match bytes {
Cow::Borrowed(bs) => str::from_utf8(bs).map_err(Error::from).map(Cow::Borrowed),
Cow::Owned(bs) => String::from_utf8(bs).map_err(Error::from).map(Cow::Owned),
};
return s.map(Some);
}
Err(box_err!("unsupported charset: {}", chrst))
}
fn eval_time<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, Time>>> {
match *self {
Expression::Constant(ref constant) => constant.eval_time(),
Expression::ColumnRef(ref column) => column.eval_time(row),
Expression::ScalarFn(ref f) => f.eval_time(ctx, row),
}
}
fn eval_duration<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, Duration>>> {
match *self {
Expression::Constant(ref constant) => constant.eval_duration(),
Expression::ColumnRef(ref column) => column.eval_duration(row),
Expression::ScalarFn(ref f) => f.eval_duration(ctx, row),
}
}
fn eval_json<'a, 'b: 'a>(
&'b self,
ctx: &mut EvalContext,
row: &'a [Datum],
) -> Result<Option<Cow<'a, Json>>> {
match *self {
Expression::Constant(ref constant) => constant.eval_json(),
Expression::ColumnRef(ref column) => column.eval_json(row),
Expression::ScalarFn(ref f) => f.eval_json(ctx, row),
}
}
/// IsHybridType checks whether a ClassString expression is a hybrid type value which will
/// return different types of value in different context.
/// For ENUM/SET which is consist of a string attribute `Name` and an int attribute `Value`,
/// it will cause an error if we convert ENUM/SET to int as a string value.
/// For Bit/Hex, we will get a wrong result if we convert it to int as a string value.
/// For example, when convert `0b101` to int, the result should be 5, but we will get
/// 101 if we regard it as a string.
pub fn is_hybrid_type(&self) -> bool {
types::is_hybrid_type(self.get_tp().get_tp() as u8)
}
}
impl Expression {
pub fn eval(&self, ctx: &mut EvalContext, row: &[Datum]) -> Result<Datum> {
match *self {
Expression::Constant(ref constant) => Ok(constant.eval()),
Expression::ColumnRef(ref column) => Ok(column.eval(row)),
Expression::ScalarFn(ref f) => f.eval(ctx, row),
}
}
pub fn | (ctx: &mut EvalContext, exprs: Vec<Expr>) -> Result<Vec<Self>> {
let mut data = Vec::with_capacity(exprs.len());
for expr in exprs {
let ex = Expression::build(ctx, expr)?;
data.push(ex);
}
Ok(data)
}
pub fn build(ctx: &mut EvalContext, mut expr: Expr) -> Result<Self> {
debug!("build expr:{:?}", expr);
let tp = expr.take_field_type();
match expr.get_tp() {
ExprType::Null => Ok(Expression::new_const(Datum::Null, tp)),
ExprType::Int64 => number::decode_i64(&mut expr.get_val())
.map(Datum::I64)
.map(|e| Expression::new_const(e, tp))
.map_err(Error::from),
ExprType::Uint64 => number::decode_u64(&mut expr.get_val())
.map(Datum::U64)
.map(|e| Expression::new_const(e, tp))
.map_err(Error::from),
ExprType::String | ExprType::Bytes => {
Ok(Expression::new_const(Datum::Bytes(expr.take_val()), tp))
}
ExprType::Float32 | ExprType::Float64 => number::decode_f64(&mut expr.get_val())
.map(Datum::F64)
.map(|e| Expression::new_const(e, tp))
.map_err(Error::from),
ExprType::MysqlTime => number::decode_u64(&mut expr.get_val())
.map_err(Error::from)
.and_then(|i| {
let fsp = tp.get_decimal() as i8;
let t = tp.get_tp() as u8;
Time::from_packed_u64(i, t, fsp, ctx.cfg.tz)
})
.map(|t| Expression::new_const(Datum::Time(t), tp)),
ExprType::MysqlDuration => number::decode_i64(&mut expr.get_val())
.map_err(Error::from)
.and_then(|n| Duration::from_nanos(n, MAX_FSP))
.map(Datum::Dur)
.map(|e| Expression::new_const(e, tp)),
ExprType::MysqlDecimal => Decimal::decode(&mut expr.get_val())
.map(Datum::Dec)
.map(|e| Expression::new_const(e, tp))
.map_err(Error::from),
ExprType::MysqlJson => Json::decode(&mut expr.get_val())
.map(Datum::Json)
.map(|e| Expression::new_const(e, tp))
.map_err(Error::from),
ExprType::ScalarFunc => {
ScalarFunc::check_args(expr.get_sig(), expr.get_children().len())?;
expr.take_children()
.into_iter()
.map(|child| Expression::build(ctx, child))
.collect::<Result<Vec<_>>>()
.map(|children| {
Expression::ScalarFn(ScalarFunc {
sig: expr.get_sig(),
children,
tp,
})
})
}
ExprType::ColumnRef => {
let offset = number::decode_i64(&mut expr.get_val()).map_err(Error::from)? as usize;
let column = Column { offset, tp };
Ok(Expression::ColumnRef(column))
}
unhandled => Err(box_err!("can't handle {:?} expr in DAG mode", unhandled)),
}
}
}
#[inline]
pub fn eval_arith<F>(ctx: &mut EvalContext, left: Datum, right: Datum, f: F) -> Result<Datum>
where
F: FnOnce(Datum, &mut EvalContext, Datum) -> codec::Result<Datum>,
{
let left = left.into_arith(ctx)?;
let right = right.into_arith(ctx)?;
let (left, right) = Datum::coerce(left, right)?;
if left == Datum::Null || right == Datum::Null {
return Ok(Datum::Null);
}
f(left, ctx, right).map_err(From::from)
}
#[cfg(test)]
mod test {
use super::{Error, EvalConfig, EvalContext, Expression};
use coprocessor::codec::error::{ERR_DATA_OUT_OF_RANGE, ERR_DIVISION_BY_ZERO};
use coprocessor::codec::mysql::json::JsonEncoder;
use coprocessor::codec::mysql::{
charset, types, Decimal, DecimalEncoder, Duration, Json, Time,
};
use coprocessor::codec::{convert, mysql, Datum};
use std::sync::Arc;
use std::{i64, u64};
use tipb::expression::{Expr, ExprType, FieldType, ScalarFuncSig};
use util::codec::number::{self, NumberEncoder};
#[inline]
pub fn str2dec(s: &str) -> Datum {
Datum::Dec(s.parse().unwrap())
}
#[inline]
pub fn make_null_datums(size: usize) -> Vec<Datum> {
(0..size).map(|_| Datum::Null).collect()
}
#[inline]
pub fn check_overflow(e: Error) -> Result<(), ()> {
if e.code() == ERR_DATA_OUT_OF_RANGE {
Ok(())
} else {
Err(())
}
}
#[inline]
pub fn check_divide_by_zero(e: Error) -> Result<(), ()> {
if e.code() == ERR_DIVISION_BY_ZERO {
Ok(())
} else {
Err(())
}
}
pub fn scalar_func_expr(sig: ScalarFuncSig, children: &[Expr]) -> Expr {
let mut expr = Expr::new();
expr.set_tp(ExprType::ScalarFunc);
expr.set_sig(sig);
expr.set_field_type(FieldType::new());
for child in children {
expr.mut_children().push(child.clone());
}
expr
}
pub fn col_expr(col_id: i64) -> Expr {
let mut expr = Expr::new();
expr.set_tp(ExprType::ColumnRef);
let mut buf = Vec::with_capacity(8);
buf.encode_i64(col_id).unwrap();
expr.set_val(buf);
expr
}
pub fn string_datum_expr_with_tp(
datum: Datum,
tp: u8,
flag: u64,
flen: i32,
charset: String,
collate: i32,
) -> Expr {
let mut expr = Expr::new();
match datum {
Datum::Bytes(bs) => {
expr.set_tp(ExprType::Bytes);
expr.set_val(bs);
expr.mut_field_type().set_tp(i32::from(tp));
expr.mut_field_type().set_flag(flag as u32);
expr.mut_field_type().set_flen(flen);
expr.mut_field_type().set_charset(charset);
expr.mut_field_type().set_collate(collate);
}
Datum::Null => expr.set_tp(ExprType::Null),
d => panic!("unsupport datum: {:?}", d),
}
expr
}
pub fn datum_expr(datum: Datum) -> Expr {
let mut expr = Expr::new();
match datum {
Datum::I64(i) => {
expr.set_tp(ExprType::Int64);
let mut buf = Vec::with_capacity(number::I64_SIZE);
buf.encode_i64(i).unwrap();
expr.set_val(buf);
}
Datum::U64(u) => {
expr.set_tp(ExprType::Uint64);
let mut buf = Vec::with_capacity(number::U64_SIZE);
buf.encode_u64(u).unwrap();
expr.set_val(buf);
expr.mut_field_type().set_flag(types::UNSIGNED_FLAG as u32);
}
Datum::Bytes(bs) => {
expr.set_tp(ExprType::Bytes);
expr.set_val(bs);
expr.mut_field_type()
.set_charset(charset::CHARSET_UTF8.to_owned());
}
Datum::F64(f) => {
expr.set_tp(ExprType::Float64);
let mut buf = Vec::with_capacity(number::F64_SIZE);
buf.encode_f64(f).unwrap();
expr.set_val(buf);
}
Datum::Dur(d) => {
expr.set_tp(ExprType::MysqlDuration);
let mut buf = Vec::with_capacity(number::I64_SIZE);
buf.encode_i64(d.to_nanos()).unwrap();
expr.set_val(buf);
}
Datum::Dec(d) => {
expr.set_tp(ExprType::MysqlDecimal);
let (prec, frac) = d.prec_and_frac();
let mut buf = Vec::with_capacity(mysql::dec_encoded_len(&[prec, frac]).unwrap());
buf.encode_decimal(&d, prec, frac).unwrap();
expr.set_val(buf);
}
Datum::Time(t) => {
expr.set_tp(ExprType::MysqlTime);
let mut ft = FieldType::new();
ft.set_tp(i32::from(t.get_tp()));
ft.set_decimal(i32::from(t.get_fsp()));
expr.set_field_type(ft);
let u = t.to_packed_u64();
let mut buf = Vec::with_capacity(number::U64_SIZE);
buf.encode_u64(u).unwrap();
expr.set_val(buf);
}
Datum::Json(j) => {
expr.set_tp(ExprType::MysqlJson);
let mut buf = Vec::new();
buf.encode_json(&j).unwrap();
expr.set_val(buf);
}
Datum::Null => expr.set_tp(ExprType::Null),
d => panic!("unsupport datum: {:?}", d),
};
expr
}
#[test]
fn test_expression_eval() {
let mut ctx = EvalContext::new(Arc::new(EvalConfig::default_for_test()));
let cases = vec![
(
ScalarFuncSig::CastStringAsReal,
vec![Datum::Bytes(b"123".to_vec())],
Datum::F64(123f64),
),
(
ScalarFuncSig::CastStringAsDecimal,
vec![Datum::Bytes(b"123".to_vec())],
Datum::Dec(Decimal::from(123)),
),
(
ScalarFuncSig::CastStringAsDuration,
vec![Datum::Bytes(b"12:02:03".to_vec())],
Datum::Dur(Duration::parse(b"12:02:03", 0).unwrap()),
),
(
ScalarFuncSig::CastStringAsTime,
vec![Datum::Bytes(b"2012-12-12 14:00:05".to_vec())],
Datum::Time(Time::parse_utc_datetime("2012-12-12 14:00:05", 0).unwrap()),
),
(
ScalarFuncSig::CastStringAsString,
vec![Datum::Bytes(b"134".to_vec())],
Datum::Bytes(b"134".to_vec()),
),
(
ScalarFuncSig::CastIntAsJson,
vec![Datum::I64(12)],
Datum::Json(Json::I64(12)),
),
];
for (sig, cols, exp) in cases {
let mut col_expr = col_expr(0);
col_expr
.mut_field_type()
.set_charset(charset::CHARSET_UTF8.to_owned());
let mut ex = scalar_func_expr(sig, &[col_expr]);
ex.mut_field_type()
.set_decimal(convert::UNSPECIFIED_LENGTH as i32);
ex.mut_field_type()
.set_flen(convert::UNSPECIFIED_LENGTH as i32);
let e = Expression::build(&mut ctx, ex).unwrap();
let res = e.eval(&mut ctx, &cols).unwrap();
if let Datum::F64(_) = exp {
assert_eq!(format!("{}", res), format!("{}", exp));
} else {
assert_eq!(res, exp);
}
}
// cases for integer
let cases = vec![
(
Some(types::UNSIGNED_FLAG),
vec![Datum::U64(u64::MAX)],
Datum::U64(u64::MAX),
),
(None, vec![Datum::I64(i64::MIN)], Datum::I64(i64::MIN)),
(None, vec![Datum::Null], Datum::Null),
];
for (flag, cols, exp) in cases {
let col_expr = col_expr(0);
let mut ex = scalar_func_expr(ScalarFuncSig::CastIntAsInt, &[col_expr]);
if flag.is_some() {
ex.mut_field_type().set_flag(flag.unwrap() as u32);
}
let e = Expression::build(&mut ctx, ex).unwrap();
let res = e.eval(&mut ctx, &cols).unwrap();
assert_eq!(res, exp);
}
}
}
| batch_build |
signal_unix.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd haiku linux netbsd openbsd solaris |
func os_sigpipe() {
onM(sigpipe)
} |
package runtime
func sigpipe() |
test_volume_replication.py | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for volume replication API code.
"""
import json
import mock
from oslo_config import cfg
import webob
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeReplicationAPITestCase(test.TestCase):
"""Test Cases for replication API."""
def setUp(self):
super(VolumeReplicationAPITestCase, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake', True)
self.volume_params = {
'host': CONF.host,
'size': 1}
def _get_resp(self, operation, volume_id, xml=False):
"""Helper for a replication action req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
if xml:
body = '<os-%s-replica/>' % operation
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
req.body = body
else:
body = {'os-%s-replica' % operation: ''}
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
res = req.get_response(app())
return req, res
def test_promote_bad_id(self):
(req, res) = self._get_resp('promote', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_bad_id_xml(self):
(req, res) = self._get_resp('promote', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def | (self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_promote_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status_xml(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
def test_reenable_bad_id(self):
(req, res) = self._get_resp('reenable', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_bad_id_xml(self):
(req, res) = self._get_resp('reenable', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_reenable_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
| test_promote_volume_not_replicated |
peregrineApp.js | /*-
* #%L
* peregrine vuejs page renderer - UI Apps
* %%
* Copyright (C) 2017 headwire inc.
* %%
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* #L%
*/
import { LoggerFactory } from './logger.js'
let log = LoggerFactory.logger('peregrineApp').setDebugLevel()
import state from './state.js'
import merge from './merge.js'
import { pagePathToDataPath, componentNameToVarName } from './util.js'
let view
let loadedComponents = []
let perVueApp = null
function makePathInfo(path) {
let hash = ''
if(path.indexOf('#') >= 0) {
hash = path.substring(path.indexOf('#'))
path = path.substring(0, path.indexOf('#'))
}
log.fine('makePathInfo for path', path)
var htmlPos = path.indexOf('.html')
var pathPart = path
var suffixPath = ''
if(htmlPos >= 0) {
suffixPath = path.slice(htmlPos)
pathPart = path.slice(0, htmlPos+5)
}
var suffixParams = {}
if(suffixPath.length > 0) {
suffixPath = suffixPath.slice(6)
var suffixParamList = suffixPath.split('//')
for(var i = 0; i < suffixParamList.length; i+= 2) {
suffixParams[suffixParamList[i]] = suffixParamList[i+1]
}
}
var ret = { path: pathPart, suffix: suffixPath , suffixParams: suffixParams, hash: hash }
log.fine('makePathInfo res:',ret)
return ret
}
function get(node, path, value) {
var vue = perVueApp
path = path.slice(1).split('/').reverse()
while(path.length > 1) {
var segment = path.pop()
if(!node[segment]) {
if(vue) {
Vue.set(node, segment, {})
} else {
node[segment] = {}
}
}
node = node[segment]
}
if(value && !node[path[0]]) {
if(vue) {
Vue.set(node, path[0], value)
} else {
node[path[0]] = value
}
}
return node[path[0]]
}
function set(node, path, value) {
var vue = perVueApp
path = path.slice(1).split('/').reverse()
while(path.length > 1) {
var segment = path.pop()
if(!node[segment]) {
if(vue) {
Vue.set(node, segment, {})
} else {
node[segment] = {}
}
}
node = node[segment]
}
if(vue) {
Vue.set(node, path[0], value)
}
else {
node[path[0]] = value
}
}
function | () {
ReactDOM.render(React.createElement(CmpPagerenderReactStructurePage, {model: getPerView().page} ), document.getElementById('peregrine-app'));
}
function registerViewImpl(v) {
view = v
}
function getView() {
if(window && window.parent && window.parent.$perAdminView && window.parent.$perAdminView.pageView) {
log.fine("getVIEW() - window.parent.perAdminView.pageView");
return window.parent.$perAdminView.pageView
}
return view
}
function getPerView() {
return getView()
}
function loadComponentImpl(name) {
if(!loadedComponents[name]) {
log.fine('loading component', name)
var varName = componentNameToVarName(name)
// if(window[varName]) {
// Vue.component(name, window[varName])
// }
// if we are in edit mode push the component to the perAdminApp as well
if(window.parent.$perAdminApp && !window.parent[varName]) {
window.parent[varName] = window[varName]
}
loadedComponents[name] = true
} else {
log.fine('component %s already loaded', name)
}
}
function walkTreeAndLoad(node) {
if(node.component) loadComponentImpl(node.component)
if(node.children) {
node.children.forEach(function (child) {
walkTreeAndLoad(child)
})
}
}
function getNodeFromImpl(node, path) {
return get(node, path)
}
function processLoaders(loaders) {
return new Promise( (resolve, reject) => {
var promises = []
if(loaders) {
for(var i = 0; i < loaders.length; i++) {
var loader = loaders[i].split(':')
if(loader.length < 2) {
log.fine('unknown loader', loaders[i])
} else {
log.fine('loading data with', loader[0], loader[1])
var pathFrom = loader[1]
var dataToLoad = getNodeFromImpl(view, pathFrom)
log.fine(dataToLoad)
if(api[loader[0]]) {
promises.push(api[loader[0]](dataToLoad))
} else {
log.error('missing', loader[0])
reject('missing ' + loader[0]+' '+dataToLoad)
}
}
}
}
Promise.all(promises).then( () => resolve() )
})
}
function processLoadedContent(data, path, firstTime, fromPopState) {
walkTreeAndLoad(data)
if(data.description) document.getElementsByTagName('meta').description.content=data.description
if(data.tags) document.getElementsByTagName('meta').keywords.content=data.tags
if(data.suffixToParameter) {
const pathInfo = makePathInfo(path)
for(let i = 0; i < data.suffixToParameter.length; i+=2) {
const name = data.suffixToParameter[i]
const location = data.suffixToParameter[i+1]
set(getPerView(), location, pathInfo.suffixParams[name])
}
}
processLoaders(data.loaders).then( () => {
log.fine('first time', firstTime)
getPerView().page = data;
getPerView().path = path.slice(0, path.indexOf('.html'));
getPerView().status = 'loaded';
// if(firstTime) {
initPeregrineApp();
// }
if(document.location !== path && !fromPopState && !firstTime) {
log.fine("PUSHSTATE : " + path);
document.title = getPerView().page.title
var url = document.location.href
var domains = (getPerView().page.domains)
var newLocation = path
if (domains) {
for (var i = 0; i < domains.length; i++) {
var domain = domains[i]
if (url.startsWith(domain)) {
newLocation = '/' + path.split('/').slice(4).join('/')
}
}
}
history.pushState({peregrinevue: true, path: path}, path, newLocation)
scroll(0, 0)
// Create the event.
var event = document.createEvent('Event')
// Define that the event name is 'build'.
event.initEvent('pageRendered', true, true)
// target can be any Element or other EventTarget.
window.dispatchEvent(event)
}
})
}
function loadContentImpl(path, firstTime, fromPopState) {
log.fine('loading content for', path, firstTime, fromPopState)
var dataUrl = pagePathToDataPath(path);
log.fine(dataUrl)
getPerView().status = undefined;
axios.get(dataUrl).then(function (response) {
log.fine('got data for', path)
// if(response.data.template) {
//
// var pageData = response.data
//
// axios.get(response.data.template+'.data.json').then(function(response) {
//
// var templateData = response.data
// var mergedData = merge(templateData, pageData)
// //merging nav, footer and content together with pageData
// processLoadedContent(mergedData, path, firstTime, fromPopState)
// }).catch(function(error) {
// log.error("error getting %s %j", dataUrl, error);
// })
// } else {
processLoadedContent(response.data, path, firstTime, fromPopState)
// }
}).catch(function(error) {
log.error("error getting %s %j", dataUrl, error);
});
}
function isAuthorModeImpl() {
if(window && window.parent && window.parent.$perAdminView && window.parent.$perAdminView.pageView) {
return true
}
return false
}
var peregrineApp = {
registerView: function(view) {
registerViewImpl(view)
},
loadContent: function(path, firstTime = false, fromPopState = false) {
loadContentImpl(path, firstTime, fromPopState)
},
logger: function(name) {
return logger.logger(name)
},
loadComponent: function(name) {
loadComponentImpl(name)
},
getPerVueApp: function() {
return perVueApp
},
isAuthorMode: function() {
return isAuthorModeImpl()
},
getView: function() {
return getPerView()
},
componentNameToVar(name) {
return componentNameToVarName(name)
}
}
/**
* you should use this object as follows:
*
* var $perView = {};
* $peregrineApp.registerView($perView)
* $peregrineApp.loadContent('/content/sites/example.html')
*
*/
export default peregrineApp
| initPeregrineApp |
NPCFriendPage.py | from . import ShtikerPage
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toon import NPCFriendPanel
from toontown.toonbase import TTLocalizer
class NPCFriendPage(ShtikerPage.ShtikerPage):
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
def load(self):
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.NPCFriendPageTitle, text_scale=0.12, textMayChange=0, pos=(0, 0, 0.6))
self.friendPanel = NPCFriendPanel.NPCFriendPanel(parent=self)
self.friendPanel.setScale(0.1225)
self.friendPanel.setZ(-0.03) | del self.title
del self.friendPanel
def updatePage(self):
self.friendPanel.update(base.localAvatar.NPCFriendsDict, fCallable=0)
def enter(self):
self.updatePage()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
ShtikerPage.ShtikerPage.exit(self) |
def unload(self):
ShtikerPage.ShtikerPage.unload(self) |
kendo.messages.hu-HU.js | /*!
* Copyright 2017 Telerik AD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function(f){
if (typeof define === 'function' && define.amd) {
define(["kendo.core"], f);
} else {
f();
}
}(function(){
(function ($, undefined) {
/* FlatColorPicker messages */
if (kendo.ui.FlatColorPicker) {
kendo.ui.FlatColorPicker.prototype.options.messages =
$.extend(true, kendo.ui.FlatColorPicker.prototype.options.messages,{
"apply": "Alkalmaz",
"cancel": "Mégse",
"noColor": "nincs szín",
"clearColor": "Tiszta szín"
});
}
/* ColorPicker messages */
if (kendo.ui.ColorPicker) {
kendo.ui.ColorPicker.prototype.options.messages =
$.extend(true, kendo.ui.ColorPicker.prototype.options.messages,{
"apply": "Alkalmaz",
"cancel": "Mégse",
"noColor": "nincs szín",
"clearColor": "Tiszta szín"
});
}
/* ColumnMenu messages */
if (kendo.ui.ColumnMenu) {
kendo.ui.ColumnMenu.prototype.options.messages =
$.extend(true, kendo.ui.ColumnMenu.prototype.options.messages,{
"sortAscending": "Rendezés növekvő",
"sortDescending": "Rendezés csökkenő",
"filter": "Szűrés",
"columns": "Oszlopok",
"done": "Kész",
"settings": "Oszlopbeállítások",
"lock": "Rögzít",
"unlock": "Felold"
});
}
/* Editor messages */
if (kendo.ui.Editor) {
kendo.ui.Editor.prototype.options.messages =
$.extend(true, kendo.ui.Editor.prototype.options.messages,{
"bold": "Félkövér",
"italic": "Dőlt",
"underline": "Aláhúzott",
"strikethrough": "Áthúzott",
"superscript": "Felső index",
"subscript": "Alsó index",
"justifyCenter": "Középre igazítás",
"justifyLeft": "Balra igazítás",
"justifyRight": "Jobbra igazítás",
"justifyFull": "Sorkizárás",
"insertUnorderedList": "Számozatlan lista beszúrása",
"insertOrderedList": "Számozott lista beszúrása",
"indent": "Behúzás növelése",
"outdent": "Behúzás csökkentése",
"createLink": "Hivatkozás beszúrása",
"unlink": "Hivatkozás eltávolítása",
"insertImage": "Kép beszúrása",
"insertFile": "Fájl beszúrása",
"insertHtml": "HTML beszúrása",
"viewHtml": "HTML megtekintése",
"fontName": "Betűtípus választás",
"fontNameInherit": "(örökölt betűtípus)",
"fontSize": "Betűméret választás",
"fontSizeInherit": "(örökölt méret)",
"formatBlock": "Formátum",
"formatting": "Formázás",
"foreColor": "Tintaszín",
"backColor": "Háttérszín",
"style": "Stílusok",
"emptyFolder": "Üres mappa",
"uploadFile": "Feltöltés",
"orderBy": "Rendezés:",
"orderBySize": "Méret",
"orderByName": "Név",
"invalidFileType": "A fájl \"{0}\" nem megfelelő típusú. Támogatott fájltípusok: {1}.",
"deleteFile": "Biztos, hogy törli a fájlt: \"{0}\"?",
"overwriteFile": "A mappában már létezik \"{0}\" nevű fájl. Kívánja felülírni?",
"directoryNotFound": "Nincs ilyen nevű könyvtár.",
"imageWebAddress": "Webcím",
"imageAltText": "Alternatív szöveg",
"imageWidth": "Szélesség (px)",
"imageHeight": "Magasság (px)",
"fileWebAddress": "Webcím",
"fileTitle": "Cím",
"linkWebAddress": "Webcím",
"linkText": "Szöveg",
"linkToolTip": "Helyi súgó",
"linkOpenInNewWindow": "Megnyitás új ablakban",
"dialogUpdate": "Frissítés",
"dialogInsert": "Beszúrás",
"dialogButtonSeparator": "vagy",
"dialogCancel": "Mégse",
"cleanFormatting": "Formázás törlése",
"createTable": "Táblázat létrehozása",
"addColumnLeft": "Oszlop beszúrása balra",
"addColumnRight": "Oszlop beszúrása jobbra",
"addRowAbove": "Sor beszúrása fölé",
"addRowBelow": "Sor beszúrása alá",
"deleteRow": "Sor törlése",
"deleteColumn": "Oszlop törlése",
"dialogOk": "OK",
"tableWizard": "Tábla varázsló",
"tableTab": "Tábla",
"cellTab": "Cella",
"accessibilityTab": "Hozzáférhetőség",
"caption": "Szöveg",
"summary": "Összegzés",
"width": "Szélesség",
"height": "Magasság",
"cellSpacing": "Cella térköz",
"cellPadding": "Cella párnázás",
"cellMargin": "Cella margó",
"alignment": "Igazítás",
"background": "Háttér",
"cssClass": "CSS osztály",
"id": "ID",
"border": "Szegély",
"borderStyle": "Szegély stílusa",
"collapseBorders": "Keretek összeomlasztása",
"wrapText": "Szövegtörés",
"associateCellsWithHeaders": "Cellák társítása a fejlécekkel",
"alignLeft": "Balra igazítás",
"alignCenter": "Középre igazítás",
"alignRight": "Jobbra igazítás",
"alignLeftTop": "Igazítás balra és felülre",
"alignCenterTop": "Igazítás vízszintesen középre és felülre",
"alignRightTop": "Igazítás jobbra és felülre",
"alignLeftMiddle": "Igazítás balra és függőlegesen középre",
"alignCenterMiddle": "Igazítás vízszintesen és függőlegesen középre",
"alignRightMiddle": "Igazítás jobbra és függőlegesen középre",
"alignLeftBottom": "Igazítás balra és alulra",
"alignCenterBottom": "Igazítás vízszintesen középre és alulra",
"alignRightBottom": "Igazítás jobbra és alulra",
"alignRemove": "Igazítás törlése",
"columns": "Oszlopok",
"rows": "Sorok",
"selectAllCells": "Összes cella kiválasztása"
});
}
/* FileBrowser messages */
if (kendo.ui.FileBrowser) {
kendo.ui.FileBrowser.prototype.options.messages =
$.extend(true, kendo.ui.FileBrowser.prototype.options.messages,{
"uploadFile": "Feltöltés",
"orderBy": "Rendezés",
"orderByName": "Név",
"orderBySize": "Méret",
"directoryNotFound": "Nincs ilyen nevű könyvtár.",
"emptyFolder": "Üres könyvtár",
"deleteFile": "Biztos, hogy törli a fájlt: \"{0}\"?",
"invalidFileType": "A fájl \"{0}\" nem megfelelő típusú. Támogatott fájltípusok: {1}.",
"overwriteFile": "A mappában már létezik \"{0}\" nevű fájl. Kívánja felülírni?",
"dropFilesHere": "húzza ide a fájlt a feltöltéshez",
"search": "Keresés"
});
}
/* FilterCell messages */
if (kendo.ui.FilterCell) {
kendo.ui.FilterCell.prototype.options.messages =
$.extend(true, kendo.ui.FilterCell.prototype.options.messages,{
"isTrue": "igaz",
"isFalse": "hamis",
"filter": "Szűrés",
"clear": "Törlés",
"operator": "Művelet"
});
}
/* FilterCell operators */
if (kendo.ui.FilterCell) {
kendo.ui.FilterCell.prototype.options.operators =
$.extend(true, kendo.ui.FilterCell.prototype.options.operators,{
"string": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"startswith": "kezdődik",
"contains": "tartalmazza",
"doesnotcontain": "nem tartalmazza",
"endswith": "végződik",
"isnull": "null",
"isnotnull": "nem null",
"isempty": "üres",
"isnotempty": "nem üres"
},
"number": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"gte": "nagyobb vagy egyenlő",
"gt": "nagyobb",
"lte": "kisebb vagy egyenlő",
"lt": "kisebb",
"isnull": "null",
"isnotnull": "nem null"
},
"date": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"gte": "utána vagy ekkor",
"gt": "utána",
"lte": "előtte vagy ekkor",
"lt": "előtte",
"isnull": "null",
"isnotnull": "nem null"
},
"enums": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"isnull": "null",
"isnotnull": "nem null"
}
});
}
/* FilterMenu messages */
if (kendo.ui.FilterMenu) {
kendo.ui.FilterMenu.prototype.options.messages =
$.extend(true, kendo.ui.FilterMenu.prototype.options.messages,{
"info": "Megjelenítendő elemek értéke:",
"isTrue": "igaz",
"isFalse": "hamis",
"filter": "Szűrés",
"clear": "Törlés",
"and": "és",
"or": "vagy",
"selectValue": "-Válasszon-",
"operator": "Művelet",
"value": "Érték",
"cancel": "Mégse"
});
}
/* FilterMenu operator messages */
if (kendo.ui.FilterMenu) {
kendo.ui.FilterMenu.prototype.options.operators =
$.extend(true, kendo.ui.FilterMenu.prototype.options.operators,{
"string": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"startswith": "kezdődik",
"contains": "tartalmazza",
"doesnotcontain": "nem tartalmazza",
"endswith": "végződik",
"isnull": "null",
"isnotnull": "nem null",
"isempty": "üres",
"isnotempty": "nem üres"
},
"number": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"gte": "nagyobb vagy egyenlő",
"gt": "nagyobb",
"lte": "kisebb vagy egyenlő",
"lt": "kisebb",
"isnull": "null",
"isnotnull": "nem null"
},
"date": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"gte": "utána vagy ekkor",
"gt": "utána",
"lte": "előtte vagy ekkor",
"lt": "előtte",
"isnull": "null",
"isnotnull": "nem null"
},
"enums": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"isnull": "null",
"isnotnull": "nem null"
}
});
}
/* FilterMultiCheck messages */
if (kendo.ui.FilterMultiCheck) {
kendo.ui.FilterMultiCheck.prototype.options.messages =
$.extend(true, kendo.ui.FilterMultiCheck.prototype.options.messages,{
"checkAll": "Összes kijelölése",
"clear": "Törlés",
"filter": "Szűrés",
"search": "Keresés"
});
}
/* Gantt messages */
if (kendo.ui.Gantt) {
kendo.ui.Gantt.prototype.options.messages =
$.extend(true, kendo.ui.Gantt.prototype.options.messages,{
"actions": {
"addChild": "Gyermekelem hozzáadása",
"append": "Feladat hozzáadása",
"insertAfter": "Hozzáadás alá",
"insertBefore": "Hozzáadás fölé",
"pdf": "Exportálás PDF-be"
},
"cancel": "Mégse",
"deleteDependencyWindowTitle": "Függőség törlése",
"deleteTaskWindowTitle": "Feladat törlése",
"destroy": "Törlés",
"editor": {
"assingButton": "Hozzárendelés",
"editorTitle": "Feladat",
"end": "Vége",
"percentComplete": "Befejezés",
"resources": "Erőforrások",
"resourcesEditorTitle": "Erőforrások",
"resourcesHeader": "Erőforrások",
"start": "Indítás",
"title": "Cím",
"unitsHeader": "Egységek"
},
"save": "Mentés",
"views": {
"day": "nap",
"end": "vége",
"month": "hónap",
"start": "eleje",
"week": "hét",
"year": "év"
}
});
}
/* Grid messages */
if (kendo.ui.Grid) {
kendo.ui.Grid.prototype.options.messages =
$.extend(true, kendo.ui.Grid.prototype.options.messages,{
"commands": {
"cancel": "Módosítások elvetése",
"canceledit": "Mégse",
"create": "Új elem",
"destroy": "Törlés",
"edit": "Szerkesztés",
"excel": "Exportálás Excel-be",
"pdf": "Exportálás PDF-be",
"save": "Módosítások mentése",
"select": "Választás",
"update": "Frissítés"
},
"editable": {
"cancelDelete": "Mégse",
"confirmation": "Biztos, hogy törli az elemet?",
"confirmDelete": "Törlés"
},
"noRecords": "Nincsenek elérhető elemek."
});
}
/* TreeList messages */
if (kendo.ui.TreeList) {
kendo.ui.TreeList.prototype.options.messages =
$.extend(true, kendo.ui.TreeList.prototype.options.messages,{
"noRows": "Nincsenek megjeleníthető elemek",
"loading": "Betöltés...",
"requestFailed": "A kérés sikertelen.",
"retry": "Újra",
"commands": {
"edit": "Szerkesztés",
"update": "Frissítés",
"canceledit": "Mégse",
"create": "Új elem",
"createchild": "Gyermekelem hozzáadása",
"destroy": "Törlés",
"excel": "Exportálás Excel-be",
"pdf": "Exportálás PDF-be"
}
});
}
/* Groupable messages */
if (kendo.ui.Groupable) {
kendo.ui.Groupable.prototype.options.messages =
$.extend(true, kendo.ui.Groupable.prototype.options.messages,{
"empty": "Húzza ide az oszlopfejlécet a csoportosításhoz"
});
}
/* NumericTextBox messages */
if (kendo.ui.NumericTextBox) {
kendo.ui.NumericTextBox.prototype.options =
$.extend(true, kendo.ui.NumericTextBox.prototype.options,{
"upArrowText": "Érték növelése",
"downArrowText": "Érték csökkentése"
});
}
/* MediaPlayer messages */
if (kendo.ui.MediaPlayer) {
kendo.ui.MediaPlayer.prototype.options.messages =
$.extend(true, kendo.ui.MediaPlayer.prototype.options.messages,{
"pause": "Szünet",
"play": "Lejátszás",
"mute": "Némítás",
"unmute": "Némítás feloldása",
"quality": "Minőség",
"fullscreen": "Teljes képernyő"
});
}
/* Pager messages */
if (kendo.ui.Pager) {
kendo.ui.Pager.prototype.options.messages =
$.extend(true, kendo.ui.Pager.prototype.options.messages,{
"allPages": "Összes",
"display": "{0}-{1} a(z) {2} elemből",
"empty": "Nincsenek megjeleníthető elemek",
"page": "Oldal",
"of": "a(z) {0}",
"itemsPerPage": "elem / oldal",
"first": "Ugrás az első oldalra",
"previous": "Ugrás az előző oldalra",
"next": "Ugrás a következő oldalra",
"last": "Ugrás az utolsó oldalra",
"refresh": "Frissítés",
"morePages": "További oldalak"
});
}
/* PivotGrid messages */
if (kendo.ui.PivotGrid) {
kendo.ui.PivotGrid.prototype.options.messages =
$.extend(true, kendo.ui.PivotGrid.prototype.options.messages,{
"measureFields": "Húzza az adatelemeket ide",
"columnFields": "Húzza az oszlopelemeket ide",
"rowFields": "Húzza a sorelemeket ide"
});
}
/* PivotFieldMenu messages */
if (kendo.ui.PivotFieldMenu) {
kendo.ui.PivotFieldMenu.prototype.options.messages =
$.extend(true, kendo.ui.PivotFieldMenu.prototype.options.messages,{
"info": "Megjelenítendő elemek értéke:",
"filterFields": "Mezőszűrő",
"filter": "Szűrés",
"include": "Mezők befoglalása...",
"title": "Befoglalandó mezők",
"clear": "Törlés",
"ok": "OK",
"cancel": "Mégse",
"operators": {
"contains": "tartalmazza",
"doesnotcontain": "nem tartalmazza",
"startswith": "kezdődik",
"endswith": "végződik",
"eq": "egyenlő",
"neq": "nem egyenlő"
}
});
}
/* RecurrenceEditor messages */
if (kendo.ui.RecurrenceEditor) {
kendo.ui.RecurrenceEditor.prototype.options.messages =
$.extend(true, kendo.ui.RecurrenceEditor.prototype.options.messages,{
"frequencies": {
"never": "Soha",
"hourly": "Óránként",
"daily": "Naponta",
"weekly": "Hetente",
"monthly": "Havonta",
"yearly": "Évente"
},
"hourly": {
"repeatEvery": "Ismételje minden: ",
"interval": " óra(k)"
},
"daily": {
"repeatEvery": "Ismételje minden: ",
"interval": " nap(ok)"
},
"weekly": {
"interval": " hét(ek)",
"repeatEvery": "Ismételje minden: ",
"repeatOn": "Ismételje: "
},
"monthly": {
"repeatEvery": "Ismételje minden: ",
"repeatOn": "Ismételje: ",
"interval": " hónap(ok)",
"day": "nap "
},
"yearly": {
"repeatEvery": "Ismételje minden: ",
"repeatOn": "Ismételje: ",
"interval": " év(ek)",
"of": " a(z) "
},
"end": {
"label": "Vége:",
"mobileLabel": "Végződik",
"never": "Soha",
"after": "Után ",
"occurrence": " előfordulás(ok)",
"on": "Ekkor: "
},
"offsetPositions": {
"first": "első",
"second": "második",
"third": "harmadik",
"fourth": "negyedik",
"last": "utolsó"
},
"weekdays": {
"day": "nap",
"weekday": "munkanap",
"weekend": "pihenőnap"
}
});
}
/* Scheduler messages */
if (kendo.ui.Scheduler) {
kendo.ui.Scheduler.prototype.options.messages =
$.extend(true, kendo.ui.Scheduler.prototype.options.messages,{
"allDay": "egész nap",
"date": "Dátum",
"event": "Esemény",
"time": "Idő",
"showFullDay": "Teljes nap mutatása",
"showWorkDay": "Munkaórák mutatása",
"today": "Ma",
"save": "Mentés",
"cancel": "Mégse",
"destroy": "Törlés",
"deleteWindowTitle": "Esemény törlése",
"ariaSlotLabel": "Kiválasztva {0:t}-tól {1:t}-ig",
"ariaEventLabel": "{0} {1:D} {2:t}",
"editable": {
"confirmation": "Biztos, hogy törölni akarja az eseményt?"
},
"views": {
"day": "nap",
"week": "Hét",
"workWeek": "Munkahét",
"agenda": "Naptár",
"month": "Hónap"
},
"recurrenceMessages": {
"deleteWindowTitle": "Ismétlődő elem törlése",
"deleteWindowOccurrence": "Alkalom törlése",
"deleteWindowSeries": "Összes ismétlődés törlése",
"editWindowTitle": "Ismétlődő elem szerkesztése",
"editWindowOccurrence": "Alkalom szerkesztése",
"editWindowSeries": "Összes ismétlődés szerkesztése",
"deleteRecurring": "Csak ezt az alkalmat szeretné törölni, vagy az összes ismétlődést?",
"editRecurring": "Csak ezt az alkalmat szeretné szerkeszteni, vagy az összes ismétlődést?"
},
"editor": {
"title": "Cím",
"start": "Kezdés",
"end": "Befejezés",
"allDayEvent": "Egész napos esemény",
"description": "Leírás",
"repeat": "Ismétlődés",
"timezone": " ",
"startTimezone": "Kezdés időzónája",
"endTimezone": "Befejezés időzónája",
"separateTimezones": "A kezdés és a befejezés időzónája eltérő",
"timezoneEditorTitle": "Időzónák",
"timezoneEditorButton": "Időzóna",
"timezoneTitle": "Időzónák",
"noTimezone": "Nincs időzóna",
"editorTitle": "Esemény"
}
});
}
/* Spreadsheet messages */
if (kendo.spreadsheet && kendo.spreadsheet.messages.borderPalette) {
kendo.spreadsheet.messages.borderPalette =
$.extend(true, kendo.spreadsheet.messages.borderPalette,{
"allBorders": "Minden szegély",
"insideBorders": "Belső szegélyek",
"insideHorizontalBorders": "Belső vízszintes szegélyek",
"insideVerticalBorders": "Belső függőleges szegélyek",
"outsideBorders": "Külső szegélyek",
"leftBorder": "Bal szegély",
"topBorder": "Felső szegély",
"rightBorder": "Jobb szegély",
"bottomBorder": "Alsó szegély",
"noBorders": "Nincs szegély",
"reset": "Alapértelmezett szín",
"customColor": "Egyéni szín...",
"apply": "Alkalmaz",
"cancel": "Mégse"
});
}
if (kendo.spreadsheet && kendo.spreadsheet.messages.dialogs) {
kendo.spreadsheet.messages.dialogs =
$.extend(true, kendo.spreadsheet.messages.dialogs,{
"apply": "Alkalmaz",
"save": "Mentés",
"cancel": "Mégse",
"remove": "Eltávolítás",
"retry": "Újra",
"revert": "Visszaállítás",
"okText": "OK",
"formatCellsDialog": {
"title": "Formázás",
"categories": {
"number": "Szám",
"currency": "Pénznem",
"date": "Dátum"
}
},
"fontFamilyDialog": {
"title": "Betűtípus"
},
"fontSizeDialog": {
"title": "Betűméret"
},
"bordersDialog": {
"title": "Szegélyek"
},
"alignmentDialog": {
"title": "Igazítás",
"buttons": {
"justtifyLeft": "Balra igazítás",
"justifyCenter": "Középre igazítás",
"justifyRight": "Jobbra igazítás",
"justifyFull": "Sorkizárás",
"alignTop": "Függőleges igazítás felülre",
"alignMiddle": "Függőleges igazítás középre",
"alignBottom": "Függőleges igazítás alulra"
}
},
"mergeDialog": {
"title": "Cellaegyesítés",
"buttons": {
"mergeCells": "Összes egyesítése",
"mergeHorizontally": "Egyesítés vízszintesen",
"mergeVertically": "Egyesítés függőlegesen",
"unmerge": "Szétválasztás"
}
},
"freezeDialog": {
"title": "Ablaktábla rögzítése",
"buttons": {
"freezePanes": "Ablaktábla rögzítése",
"freezeRows": "Sorok rögzítése",
"freezeColumns": "Oszlopok rögzítése",
"unfreeze": "Rögzítés feloldása"
}
},
"confirmationDialog": {
"text": "Biztos, hogy törli ezt a munkalapot?",
"title": "Munkalap törlése"
},
"validationDialog": {
"title": "Adatellenőrzés",
"hintMessage": "Kérem, írjon be egy érvényes {0} értéket {1}.",
"hintTitle": "Ellenőrzés {0}",
"criteria": {
"any": "Bármely érték",
"number": "Szám",
"text": "Szöveg",
"date": "Dátum",
"custom": "Egyéni szabály",
"list": "Lista"
},
"comparers": {
"greaterThan": "nagyobb, mint",
"lessThan": "kisebb, mint",
"between": "a következők közé esik",
"notBetween": "nem esik a következők közé",
"equalTo": "egyenlő",
"notEqualTo": "nem egyenlő",
"greaterThanOrEqualTo": "nagyobb vagy egyenlő",
"lessThanOrEqualTo": "kisebb vagy egyenlő"
},
"comparerMessages": {
"greaterThan": "nagyobb, mint {0}",
"lessThan": "kisebb, mint {0}",
"between": "{0} és {1} közé esik",
"notBetween": "nem esik {0} és {1} közé",
"equalTo": "egyenlő {0}",
"notEqualTo": "nem egyenlő {0}",
"greaterThanOrEqualTo": "nagyobb vagy egyenlő {0}",
"lessThanOrEqualTo": "kisebb vagy egyenlő {0}",
"custom": "megfelel a képletnek: {0}"
},
"labels": {
"criteria": "Feltétel",
"comparer": "Összehasonlító",
"min": "Minimum",
"max": "Maximum",
"value": "Érték",
"start": "Kezdés",
"end": "Befejezés",
"onInvalidData": "Érvénytelen adat beírásakor",
"rejectInput": "Bevitel visszautasítása",
"showWarning": "Figyelmeztetés megjelenítése",
"showHint": "Javaslat megjelenítése",
"hintTitle": "Javaslat címe",
"hintMessage": "Javaslat szövege",
"ignoreBlank": "Üres cellák mellőzése"
},
"placeholders": {
"typeTitle": "Típus cím",
"typeMessage": "Típus üzenet"
}
},
"saveAsDialog": {
"title": "Mentés másként...",
"labels": {
"fileName": "Fájlnév",
"saveAsType": "Fájl típusa"
}
},
"exportAsDialog": {
"title": "Exportálás...",
"labels": {
"fileName": "Fájlnév",
"saveAsType": "Fájl típusa",
"exportArea": "Exportálás",
"paperSize": "Papírméret",
"margins": "Margók",
"orientation": "Tájolás",
"print": "Nyomtatás",
"guidelines": "Segédvonalak",
"center": "Középre",
"horizontally": "Vízszintesen",
"vertically": "Függőlegesen"
}
},
"modifyMergedDialog": {
"errorMessage": "Nem lehet módosítani az egyesített cella egy részét."
},
"useKeyboardDialog": {
"title": "Másolás és beillesztés",
"errorMessage": "Ezek a műveletek nem érhetők el a menüből. Kérem, használja a következő billentyűkombinációkat:",
"labels": {
"forCopy": "másoláshoz",
"forCut": "kivágáshoz",
"forPaste": "beillesztéshez"
}
},
"unsupportedSelectionDialog": {
"errorMessage": "A művelet nem hajtható végre többes kijelölésen."
}
});
}
if (kendo.spreadsheet && kendo.spreadsheet.messages.filterMenu) {
kendo.spreadsheet.messages.filterMenu =
$.extend(true, kendo.spreadsheet.messages.filterMenu,{
"sortAscending": "Tartomány rendezése A-Z",
"sortDescending": "Tartomány rendezése Z-A",
"filterByValue": "Szűrés érték szerint",
"filterByCondition": "Szűrés feltétel alapján",
"apply": "Alkalmaz",
"search": "Keresés",
"addToCurrent": "Hozzáadás az aktuális kijelöléshez",
"clear": "Törlés",
"blanks": "(Üresek)",
"operatorNone": "Egyik sem",
"and": "és",
"or": "vagy",
"operators": {
"string": {
"contains": "A szöveg tartalmazza",
"doesnotcontain": "A szöveg nem tartalmazza",
"startswith": "A szöveg eleje",
"endswith": "A szöveg vége"
},
"date": {
"eq": "A dátum",
"neq": "A dátum nem",
"lt": "Ezen dátum előtt",
"gt": "Ezen dátum után"
},
"number": {
"eq": "egyenlő",
"neq": "nem egyenlő",
"gte": "nagyobb vagy egyenlő",
"gt": "nagyobb",
"lte": "kisebb vagy egyenlő",
"lt": "kisebb"
}
}
});
}
if (kendo.spreadsheet && kendo.spreadsheet.messages.toolbar) {
kendo.spreadsheet.messages.toolbar =
$.extend(true, kendo.spreadsheet.messages.toolbar,{
"addColumnLeft": "Oszlop hozzáadása balra",
"addColumnRight": "Oszlop hozzáadása jobbra",
"addRowAbove": "Sor hozzáadása fölé",
"addRowBelow": "Sor hozzáadása alá",
"alignment": "Igazítás",
"alignmentButtons": {
"justtifyLeft": "Balra igazítás",
"justifyCenter": "Középre igazítás",
"justifyRight": "Jobbra igazítás",
"justifyFull": "Sorkizárás",
"alignTop": "Függőleges igazítás felülre",
"alignMiddle": "Függőleges igazítás középre",
"alignBottom": "Függőleges igazítás alulra"
},
"backgroundColor": "Kitöltőszín",
"bold": "Félkövér",
"borders": "Szegélyek",
"colorPicker": {
"reset": "Alapértelmezett szín",
"customColor": "Egyéni szín..."
},
"copy": "Másolás",
"cut": "Kivágás",
"deleteColumn": "Oszlop törlése",
"deleteRow": "Sor törlése",
"excelImport": "Importálás Excel-ből...",
"filter": "Szűrés",
"fontFamily": "Betűtípus",
"fontSize": "Betűméret",
"format": "Egyéni formátum...",
"formatTypes": {
"automatic": "Automatikus",
"number": "Szám",
"percent": "Százalék",
"financial": "Könyvelési",
"currency": "Pénznem",
"date": "Dátum",
"time": "Idő",
"dateTime": "Dátum-idő",
"duration": "Időtartam",
"moreFormats": "Egyéb formátum..."
},
"formatDecreaseDecimal": "Tizedeshelyek csökkentése",
"formatIncreaseDecimal": "Tizedeshelyek növelése",
"freeze": "Ablaktábla rögzítése",
"freezeButtons": {
"freezePanes": "Ablaktábla rögzítése",
"freezeRows": "Sorok rögzítése",
"freezeColumns": "Oszlopok rögzítése",
"unfreeze": "Rögzítés feloldása"
},
"italic": "Dőlt",
"merge": "Cellaegyesítés",
"mergeButtons": {
"mergeCells": "Összes egyesítése",
"mergeHorizontally": "Egyesítés vízszintesen",
"mergeVertically": "Egyesítés függőlegesen",
"unmerge": "Szétválasztás"
},
"open": "Megnyitás...",
"paste": "Beillesztés",
"quickAccess": {
"redo": "Mégis",
"undo": "Visszavonás"
},
"saveAs": "Mentés másként...",
"sortAsc": "Rendezés növekvő",
"sortDesc": "Rendezés csökkenő",
"sortButtons": {
"sortSheetAsc": "Munkalap rendezése A-Z",
"sortSheetDesc": "Munkalap rendezése Z-A",
"sortRangeAsc": "Tartomány rendezése A-Z",
"sortRangeDesc": "Tartomány rendezése Z-A"
},
"textColor": "Tintaszín",
"textWrap": "Szöveg törése több sorba",
"underline": "Aláhúzott",
"validation": "Adatellenőrzés..."
});
}
if (kendo.spreadsheet && kendo.spreadsheet.messages.view) {
kendo.spreadsheet.messages.view =
$.extend(true, kendo.spreadsheet.messages.view,{
"errors": {
"shiftingNonblankCells": "Adatvesztés nélkül nem lehet a cellákat beszúrni. Válasszon másik beszúrási pozíciót, vagy törölje az adatokat a munkalap végéről.",
"filterRangeContainingMerges": "Nem lehet szűrőt készíteni összevonásokat tartalmazó tartományon belül",
"validationError": "A beírt érték megsérti a cellára beállított adatellenőrzési szabályokat."
},
"tabs": {
"home": "Elejére",
"insert": "Beszúrás",
"data": "Adat"
}
});
}
/* Slider messages */
if (kendo.ui.Slider) {
kendo.ui.Slider.prototype.options =
$.extend(true, kendo.ui.Slider.prototype.options,{
"increaseButtonTitle": "Növelés",
"decreaseButtonTitle": "Csökkentés"
});
}
/* TreeList messages */
if (kendo.ui.TreeList) {
kendo.ui.TreeList.prototype.options.messages =
$.extend(true, kendo.ui.TreeList.prototype.options.messages,{
"noRows": "Nincsenek megjeleníthető elemek",
"loading": "Betöltés...",
"requestFailed": "A kérés sikertelen",
"retry": "Újra",
"commands": {
"edit": "Szerkesztés",
"update": "Frissítés",
"canceledit": "Mégse",
"create": "Új elem",
"createchild": "Gyermekelem hozzáadása",
"destroy": "Törlés",
"excel": "Exportálás Excel-be",
"pdf": "Exportálás PDF-be"
}
});
}
if (kendo.ui.TreeList) {
kendo.ui.TreeList.prototype.options.columnMenu =
$.extend(true, kendo.ui.TreeList.prototype.options.columnMenu, {
"messages": {
"columns": "Oszlopválasztás",
"filter": "Szűrés",
"sortAscending": "Rendezés (növekvő)",
"sortDescending": "Rendezés (csökkenő)"
}
});
}
/* TreeView messages */
if (kendo.ui.TreeView) {
kendo.ui.TreeView.prototype.options.messages =
$.extend(true, kendo.ui.TreeView.prototype.options.messages,{
"loading": "Betöltés...",
"requestFailed": "A kérés sikertelen",
"retry": "Újra"
});
}
/* Upload messages */
if (kendo.ui.Upload) {
kendo.ui.Upload.prototype.options.localization=
$.extend(true, kendo.ui.Upload.prototype.options.localization,{
"select": "Választás...",
"cancel": "Mégse",
"retry": "Újra",
"remove": "Eltávolítás",
"clearSelectedFiles": "Törlés",
"uploadSelectedFiles": "Fájlok feltöltése",
"dropFilesHere": "húzza ide a feltöltendő fájlokat",
"statusUploading": "feltöltés",
"statusUploaded": "feltöltve",
"statusWarning": "figyelem",
"statusFailed": "sikertelen",
"headerStatusUploading": "Feltöltés...",
"headerStatusUploaded": "Kész",
"invalidMaxFileSize": "A fájl túl nagy.",
"invalidMinFileSize": "A fájl túl kicsi.",
"invalidFileExtension": "A fájltípus nem engedélyezett."
});
}
/* Validator messages */
if (kendo.ui.Validator) {
kendo.ui.Validator.prototype.options.messages =
$.extend(true, kendo.ui.Validator.prototype.options.messages,{
"required": "{0} szükséges",
"pattern": "{0} érvénytelen",
"min": "{0} nagyobb vagy egyenlő kell hogy legyen, mint {1}",
"max": "{0} kisebb vagy egyenlő kell hogy legyen, mint {1}",
"step": "{0} érvénytelen",
"email": "{0} érvénytelen email",
"url": "{0} érvénytelen URL",
"date": "{0} érvénytelen dátum",
"dateCompare": "A végdátum nagyobb vagy egyenlő kell hogy legyen, mint a kezdődátum"
});
}
/* kendo.ui.progress method */
if (kendo.ui.progress) {
kendo.ui.progress.messages =
$.extend(true, kendo.ui.progress.messages, {
loading: "Betöltés..."
});
}
/* Dialog */
if (kendo.ui.Dialog) {
kendo.ui.Dialog.prototype.options.messages =
$.extend(true, kendo.ui.Dialog.prototype.options.localization, {
"close": "Bezárás"
});
}
/* Alert */
if (kendo.ui.Alert) {
kendo.ui.Alert.prototype.options.messages =
$.extend(true, kendo.ui.Alert.prototype.options.localization, {
"okText": "OK"
});
}
/* Confirm */
if (kendo.ui.Confirm) {
kendo.ui.Confirm.prototype.options.messages =
$.extend(true, kendo.ui.Confirm.prototype.options.localization, {
"okText": "OK",
"cancel": "Mégse"
});
}
/* Prompt */
if (kendo.ui.Prompt) {
kendo.ui.Prompt.prototype.options.messages =
$.extend(true, kendo.ui.Prompt.prototype.options.localization, {
"okText": "OK",
"cancel": "Mégse"
});
}
})(window.kendo.jQuery);
})); | * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, |
csvfeed.py | # PyAlgoTrade
#
# Copyright 2011-2018 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import datetime
import pytz
import six
from pyalgotrade.utils import dt
from pyalgotrade.utils import csvutils
from pyalgotrade.barfeed import membf
from pyalgotrade import bar
# Interface for csv row parsers.
class RowParser(object):
def parseBar(self, csvRowDict):
raise NotImplementedError()
def getFieldNames(self):
raise NotImplementedError()
def getDelimiter(self):
raise NotImplementedError()
# Interface for bar filters.
class BarFilter(object):
def includeBar(self, bar_):
raise NotImplementedError()
class DateRangeFilter(BarFilter):
def __init__(self, fromDate=None, toDate=None):
self.__fromDate = fromDate
self.__toDate = toDate
def includeBar(self, bar_):
if self.__toDate and bar_.getDateTime() > self.__toDate:
return False
if self.__fromDate and bar_.getDateTime() < self.__fromDate:
return False
return True
# US Equities Regular Trading Hours filter
# Monday ~ Friday
# 9:30 ~ 16 (GMT-5)
class USEquitiesRTH(DateRangeFilter):
timezone = pytz.timezone("US/Eastern")
def __init__(self, fromDate=None, toDate=None):
super(USEquitiesRTH, self).__init__(fromDate, toDate)
self.__fromTime = datetime.time(9, 30, 0)
self.__toTime = datetime.time(16, 0, 0)
def includeBar(self, bar_):
ret = super(USEquitiesRTH, self).includeBar(bar_)
if ret:
# Check day of week
barDay = bar_.getDateTime().weekday()
if barDay > 4:
return False
# Check time
barTime = dt.localize(bar_.getDateTime(), USEquitiesRTH.timezone).time()
if barTime < self.__fromTime:
return False
if barTime > self.__toTime:
return False
return ret
class BarFeed(membf.BarFeed):
"""Base class for CSV file based :class:`pyalgotrade.barfeed.BarFeed`.
.. note::
This is a base class and should not be used directly.
"""
def __init__(self, frequency, maxLen=None):
super(BarFeed, self).__init__(frequency, maxLen)
self.__barFilter = None
self.__dailyTime = datetime.time(0, 0, 0)
def getDailyBarTime(self):
return self.__dailyTime
def setDailyBarTime(self, time):
self.__dailyTime = time
def getBarFilter(self):
return self.__barFilter
def setBarFilter(self, barFilter):
self.__barFilter = barFilter
def addBarsFromCSV(self, instrument, path, rowParser, skipMalformedBars=False):
def parse_bar_skip_malformed(row):
ret = None
try:
ret = rowParser.parseBar(row)
except Exception:
pass
return ret
if skipMalformedBars:
parse_bar = parse_bar_skip_malformed
else:
parse_bar = rowParser.parseBar
# Load the csv file
loadedBars = []
reader = csvutils.FastDictReader(open(path, "r"), fieldnames=rowParser.getFieldNames(), delimiter=rowParser.getDelimiter())
for row in reader:
bar_ = parse_bar(row)
if bar_ is not None and (self.__barFilter is None or self.__barFilter.includeBar(bar_)):
loadedBars.append(bar_)
self.addBarsFromSequence(instrument, loadedBars)
class GenericRowParser(RowParser):
def __init__(self, columnNames, dateTimeFormat, dailyBarTime, frequency, timezone, barClass=bar.BasicBar):
self.__dateTimeFormat = dateTimeFormat
self.__dailyBarTime = dailyBarTime
self.__frequency = frequency
self.__timezone = timezone
self.__haveAdjClose = False
self.__barClass = barClass
# Column names.
self.__dateTimeColName = columnNames["datetime"]
self.__openColName = columnNames["open"]
self.__highColName = columnNames["high"]
self.__lowColName = columnNames["low"]
self.__closeColName = columnNames["close"]
self.__volumeColName = columnNames["volume"]
self.__adjCloseColName = columnNames["adj_close"]
self.__columnNames = columnNames
def _parseDate(self, dateString):
ret = datetime.datetime.strptime(dateString, self.__dateTimeFormat)
if self.__dailyBarTime is not None:
ret = datetime.datetime.combine(ret, self.__dailyBarTime)
# Localize the datetime if a timezone was given.
if self.__timezone:
ret = dt.localize(ret, self.__timezone)
return ret
def barsHaveAdjClose(self):
return self.__haveAdjClose
def getFieldNames(self):
# It is expected for the first row to have the field names.
return None
def getDelimiter(self):
return ","
def parseBar(self, csvRowDict):
dateTime = self._parseDate(csvRowDict[self.__dateTimeColName])
open_ = float(csvRowDict[self.__openColName])
high = float(csvRowDict[self.__highColName])
low = float(csvRowDict[self.__lowColName])
close = float(csvRowDict[self.__closeColName])
volume = float(csvRowDict[self.__volumeColName])
adjClose = None
if self.__adjCloseColName is not None:
adjCloseValue = csvRowDict.get(self.__adjCloseColName, "")
if len(adjCloseValue) > 0:
adjClose = float(adjCloseValue)
self.__haveAdjClose = True
# Process extra columns.
extra = {}
for k, v in six.iteritems(csvRowDict):
if k not in self.__columnNames.values():
extra[k] = csvutils.float_or_string(v)
return self.__barClass(
dateTime, open_, high, low, close, volume, adjClose, self.__frequency, extra=extra
)
class GenericBarFeed(BarFeed):
"""A BarFeed that loads bars from CSV files that have the following format:
::
Date Time,Open,High,Low,Close,Volume,Adj Close
2013-01-01 13:59:00,13.51001,13.56,13.51,13.56,273.88014126,13.51001
:param frequency: The frequency of the bars. Check :class:`pyalgotrade.bar.Frequency`.
:param timezone: The default timezone to use to localize bars. Check :mod:`pyalgotrade.marketsession`.
:type timezone: A pytz timezone.
:param maxLen: The maximum number of values that the :class:`pyalgotrade.dataseries.bards.BarDataSeries` will hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the
opposite end. If None then dataseries.DEFAULT_MAX_LEN is used.
:type maxLen: int.
.. note::
* The CSV file **must** have the column names in the first row.
* It is ok if the **Adj Close** column is empty.
* When working with multiple instruments:
* If all the instruments loaded are in the same timezone, then the timezone parameter may not be specified.
* If any of the instruments loaded are in different timezones, then the timezone parameter should be set.
"""
def __init__(self, frequency, timezone=None, maxLen=None):
super(GenericBarFeed, self).__init__(frequency, maxLen)
self.__timezone = timezone
# Assume bars don't have adjusted close. This will be set to True after
# loading the first file if the adj_close column is there.
self.__haveAdjClose = False
self.__barClass = bar.BasicBar
self.__dateTimeFormat = "%Y-%m-%d %H:%M:%S"
self.__columnNames = {
"datetime": "Date Time",
"open": "Open",
"high": "High",
"low": "Low",
"close": "Close",
"volume": "Volume",
"adj_close": "Adj Close",
}
# self.__dateTimeFormat expects time to be set so there is no need to
# fix time.
self.setDailyBarTime(None)
def barsHaveAdjClose(self):
return self.__haveAdjClose
def setNoAdjClose(self):
self.__columnNames["adj_close"] = None
self.__haveAdjClose = False
def setColumnName(self, col, name):
self.__columnNames[col] = name
def setDateTimeFormat(self, dateTimeFormat):
"""
Set the format string to use with strptime to parse datetime column.
"""
self.__dateTimeFormat = dateTimeFormat
def setBarClass(self, barClass):
self.__barClass = barClass
def addBarsFromCSV(self, instrument, path, timezone=None, skipMalformedBars=False):
"""Loads bars for a given instrument from a CSV formatted file.
The instrument gets registered in the bar feed.
:param instrument: Instrument identifier.
:type instrument: string.
:param path: The path to the CSV file.
:type path: string.
:param timezone: The timezone to use to localize bars. Check :mod:`pyalgotrade.marketsession`. | """
if timezone is None:
timezone = self.__timezone
assert len(self.getAllFrequencies()) == 1
rowParser = GenericRowParser(
self.__columnNames, self.__dateTimeFormat, self.getDailyBarTime(), self.getAllFrequencies()[0],
timezone, self.__barClass
)
super(GenericBarFeed, self).addBarsFromCSV(instrument, path, rowParser, skipMalformedBars=skipMalformedBars)
if rowParser.barsHaveAdjClose():
self.__haveAdjClose = True
elif self.__haveAdjClose:
raise Exception("Previous bars had adjusted close and these ones don't have.") | :type timezone: A pytz timezone.
:param skipMalformedBars: True to skip errors while parsing bars.
:type skipMalformedBars: boolean. |
webpack.config.js | const path = require('path');
module.exports = {
mode: 'production',
entry: './src/module.js',
output: {
path: path.resolve('lib'),
filename: 'module.js',
libraryTarget: 'commonjs2',
},
module: { | {
test: /\.js?$/,
exclude: /(node_modules)/,
use: 'babel-loader',
},
],
},
resolve: {
extensions: ['.js'],
},
}; | rules: [ |
safehtml_test.js | // Copyright 2013 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Unit tests for goog.html.SafeHtml and its builders.
*/
goog.provide('goog.html.safeHtmlTest');
goog.require('goog.html.SafeHtml');
goog.require('goog.html.SafeScript');
goog.require('goog.html.SafeStyle');
goog.require('goog.html.SafeStyleSheet');
goog.require('goog.html.SafeUrl');
goog.require('goog.html.TrustedResourceUrl');
goog.require('goog.html.testing');
goog.require('goog.html.trustedtypes');
goog.require('goog.i18n.bidi.Dir');
goog.require('goog.labs.userAgent.browser');
goog.require('goog.object');
goog.require('goog.string.Const');
goog.require('goog.testing.PropertyReplacer');
goog.require('goog.testing.jsunit');
goog.setTestOnly('goog.html.safeHtmlTest');
var stubs = new goog.testing.PropertyReplacer();
var policy = goog.createTrustedTypesPolicy('closure_test');
function | () {
stubs.reset();
}
function testSafeHtml() {
// TODO(xtof): Consider using SafeHtmlBuilder instead of newSafeHtmlForTest,
// when available.
var safeHtml = goog.html.testing.newSafeHtmlForTest('Hello <em>World</em>');
assertSameHtml('Hello <em>World</em>', safeHtml);
assertEquals('Hello <em>World</em>', goog.html.SafeHtml.unwrap(safeHtml));
assertEquals('SafeHtml{Hello <em>World</em>}', String(safeHtml));
assertNull(safeHtml.getDirection());
safeHtml = goog.html.testing.newSafeHtmlForTest(
'World <em>Hello</em>', goog.i18n.bidi.Dir.RTL);
assertSameHtml('World <em>Hello</em>', safeHtml);
assertEquals('World <em>Hello</em>', goog.html.SafeHtml.unwrap(safeHtml));
assertEquals('SafeHtml{World <em>Hello</em>}', String(safeHtml));
assertEquals(goog.i18n.bidi.Dir.RTL, safeHtml.getDirection());
// Interface markers are present.
assertTrue(safeHtml.implementsGoogStringTypedString);
assertTrue(safeHtml.implementsGoogI18nBidiDirectionalString);
// Pre-defined constant.
assertSameHtml('', goog.html.SafeHtml.EMPTY);
assertSameHtml('<br>', goog.html.SafeHtml.BR);
}
/** @suppress {checkTypes} */
function testUnwrap() {
var privateFieldName = 'privateDoNotAccessOrElseSafeHtmlWrappedValue_';
var markerFieldName = 'SAFE_HTML_TYPE_MARKER_GOOG_HTML_SECURITY_PRIVATE_';
var propNames = goog.object.getKeys(goog.html.SafeHtml.htmlEscape(''));
assertContains(privateFieldName, propNames);
assertContains(markerFieldName, propNames);
var evil = {};
evil[privateFieldName] = '<script>evil()</script';
evil[markerFieldName] = {};
var exception = assertThrows(function() { goog.html.SafeHtml.unwrap(evil); });
assertContains('expected object of type SafeHtml', exception.message);
}
function testUnwrapTrustedHTML() {
var safeValue = goog.html.SafeHtml.htmlEscape('HTML');
var trustedValue = goog.html.SafeHtml.unwrapTrustedHTML(safeValue);
assertEquals(safeValue.getTypedStringValue(), trustedValue);
stubs.set(
goog.html.trustedtypes, 'PRIVATE_DO_NOT_ACCESS_OR_ELSE_POLICY', policy);
safeValue = goog.html.SafeHtml.htmlEscape('HTML');
trustedValue = goog.html.SafeHtml.unwrapTrustedHTML(safeValue);
assertEquals(safeValue.getTypedStringValue(), trustedValue.toString());
assertTrue(
goog.global.TrustedHTML ? trustedValue instanceof TrustedHTML :
goog.isString(trustedValue));
}
function testHtmlEscape() {
// goog.html.SafeHtml passes through unchanged.
var safeHtmlIn = goog.html.SafeHtml.htmlEscape('<b>in</b>');
assertTrue(safeHtmlIn === goog.html.SafeHtml.htmlEscape(safeHtmlIn));
// Plain strings are escaped.
var safeHtml = goog.html.SafeHtml.htmlEscape('Hello <em>"\'&World</em>');
assertSameHtml('Hello <em>"'&World</em>', safeHtml);
assertEquals(
'SafeHtml{Hello <em>"'&World</em>}',
String(safeHtml));
// Primitives with properties that wrongly indicate that the text is of a type
// that implements `goog.i18n.bidi.DirectionalString` and
// `goog.string.TypedString` are escaped. This simulates a property renaming
// collision with a String, Number or Boolean property set externally.
var stringWithProperties = 'Hello <em>"\'&World</em>';
stringWithProperties.implementsGoogI18nBidiDirectionalString = true;
stringWithProperties.implementsGoogStringTypedString = true;
safeHtml = goog.html.SafeHtml.htmlEscape(stringWithProperties);
assertSameHtml('Hello <em>"'&World</em>', safeHtml);
var numberWithProperties = 123;
numberWithProperties.implementsGoogI18nBidiDirectionalString = true;
numberWithProperties.implementsGoogStringTypedString = true;
safeHtml = goog.html.SafeHtml.htmlEscape(numberWithProperties);
assertSameHtml('123', safeHtml);
var booleanWithProperties = true;
booleanWithProperties.implementsGoogI18nBidiDirectionalString = true;
booleanWithProperties.implementsGoogStringTypedString = true;
safeHtml = goog.html.SafeHtml.htmlEscape(booleanWithProperties);
assertSameHtml('true', safeHtml);
// Creating from a SafeUrl escapes and retains the known direction (which is
// fixed to RTL for URLs).
var safeUrl = goog.html.SafeUrl.fromConstant(
goog.string.Const.from('http://example.com/?foo&bar'));
var escapedUrl = goog.html.SafeHtml.htmlEscape(safeUrl);
assertSameHtml('http://example.com/?foo&bar', escapedUrl);
assertEquals(goog.i18n.bidi.Dir.LTR, escapedUrl.getDirection());
// Creating SafeHtml from a goog.string.Const escapes as well (i.e., the
// value is treated like any other string). To create HTML markup from
// program literals, SafeHtmlBuilder should be used.
assertSameHtml(
'this & that',
goog.html.SafeHtml.htmlEscape(goog.string.Const.from('this & that')));
}
function testSafeHtmlCreate() {
var br = goog.html.SafeHtml.create('br');
assertSameHtml('<br>', br);
assertSameHtml(
'<span title="""></span>',
goog.html.SafeHtml.create('span', {'title': '"'}));
assertSameHtml(
'<span><</span>', goog.html.SafeHtml.create('span', {}, '<'));
assertSameHtml(
'<span><br></span>', goog.html.SafeHtml.create('span', {}, br));
assertSameHtml('<span></span>', goog.html.SafeHtml.create('span', {}, []));
assertSameHtml(
'<span></span>',
goog.html.SafeHtml.create('span', {'title': null, 'class': undefined}));
assertSameHtml(
'<span>x<br>y</span>',
goog.html.SafeHtml.create('span', {}, ['x', br, 'y']));
assertSameHtml(
'<table border="0"></table>',
goog.html.SafeHtml.create('table', {'border': 0}));
var onclick = goog.string.Const.from('alert(/"/)');
assertSameHtml(
'<span onclick="alert(/"/)"></span>',
goog.html.SafeHtml.create('span', {'onclick': onclick}));
var href = goog.html.testing.newSafeUrlForTest('?a&b');
assertSameHtml(
'<a href="?a&b"></a>',
goog.html.SafeHtml.create('a', {'href': href}));
var style = goog.html.testing.newSafeStyleForTest('border: /* " */ 0;');
assertSameHtml(
'<hr style="border: /* " */ 0;">',
goog.html.SafeHtml.create('hr', {'style': style}));
assertEquals(
goog.i18n.bidi.Dir.NEUTRAL,
goog.html.SafeHtml.create('span').getDirection());
assertNull(goog.html.SafeHtml.create('span', {'dir': 'x'}).getDirection());
assertEquals(
goog.i18n.bidi.Dir.NEUTRAL,
goog.html.SafeHtml.create('span', {'dir': 'ltr'}, 'a').getDirection());
assertThrows(function() { goog.html.SafeHtml.create('script'); });
assertThrows(function() { goog.html.SafeHtml.create('br', {}, 'x'); });
assertThrows(function() {
goog.html.SafeHtml.create('img', {'onerror': ''});
});
assertThrows(function() {
goog.html.SafeHtml.create('img', {'OnError': ''});
});
assertThrows(function() { goog.html.SafeHtml.create('a href=""'); });
assertThrows(function() {
goog.html.SafeHtml.create('a', {'title="" href': ''});
});
assertThrows(function() { goog.html.SafeHtml.create('applet'); });
assertThrows(function() {
goog.html.SafeHtml.create('applet', {'code': 'kittens.class'});
});
assertThrows(function() { goog.html.SafeHtml.create('base'); });
assertThrows(function() {
goog.html.SafeHtml.create('base', {'href': 'http://example.org'});
});
assertThrows(function() { goog.html.SafeHtml.create('math'); });
assertThrows(function() { goog.html.SafeHtml.create('meta'); });
assertThrows(function() { goog.html.SafeHtml.create('svg'); });
}
function testSafeHtmlCreate_styleAttribute() {
var style = 'color:red;';
var expected = '<hr style="' + style + '">';
assertThrows(function() {
goog.html.SafeHtml.create('hr', {'style': style});
});
assertSameHtml(expected, goog.html.SafeHtml.create('hr', {
'style': goog.html.SafeStyle.fromConstant(goog.string.Const.from(style))
}));
assertSameHtml(
expected, goog.html.SafeHtml.create('hr', {'style': {'color': 'red'}}));
}
function testSafeHtmlCreate_urlAttributes() {
// TrustedResourceUrl is allowed.
var trustedResourceUrl = goog.html.TrustedResourceUrl.fromConstant(
goog.string.Const.from('https://google.com/trusted'));
assertSameHtml(
'<img src="https://google.com/trusted">',
goog.html.SafeHtml.create('img', {'src': trustedResourceUrl}));
// SafeUrl is allowed.
var safeUrl = goog.html.SafeUrl.sanitize('https://google.com/safe');
assertSameHtml(
'<imG src="https://google.com/safe">',
goog.html.SafeHtml.create('imG', {'src': safeUrl}));
// Const is allowed.
var constUrl = goog.string.Const.from('https://google.com/const');
assertSameHtml(
'<a href="https://google.com/const"></a>',
goog.html.SafeHtml.create('a', {'href': constUrl}));
// string is allowed but escaped.
assertSameHtml(
'<a href="http://google.com/safe""></a>',
goog.html.SafeHtml.create('a', {'href': 'http://google.com/safe"'}));
// string is allowed but sanitized.
var badUrl = 'javascript:evil();';
var sanitizedUrl =
goog.html.SafeUrl.unwrap(goog.html.SafeUrl.sanitize(badUrl));
assertTrue(typeof sanitizedUrl == 'string');
assertNotEquals(badUrl, sanitizedUrl);
assertSameHtml(
'<a href="' + sanitizedUrl + '"></a>',
goog.html.SafeHtml.create('a', {'href': badUrl}));
// attribute case is ignored for url attributes purposes
assertSameHtml(
'<a hReF="' + sanitizedUrl + '"></a>',
goog.html.SafeHtml.create('a', {'hReF': badUrl}));
}
/** @suppress {checkTypes} */
function testSafeHtmlCreateIframe() {
// Setting src and srcdoc.
var url = goog.html.TrustedResourceUrl.fromConstant(
goog.string.Const.from('https://google.com/trusted<'));
assertSameHtml(
'<iframe src="https://google.com/trusted<"></iframe>',
goog.html.SafeHtml.createIframe(url, null, {'sandbox': null}));
var srcdoc = goog.html.SafeHtml.BR;
assertSameHtml(
'<iframe srcdoc="<br>"></iframe>',
goog.html.SafeHtml.createIframe(null, srcdoc, {'sandbox': null}));
// sandbox default and overriding it.
assertSameHtml(
'<iframe sandbox=""></iframe>', goog.html.SafeHtml.createIframe());
assertSameHtml(
'<iframe Sandbox="allow-same-origin allow-top-navigation"></iframe>',
goog.html.SafeHtml.createIframe(
null, null, {'Sandbox': 'allow-same-origin allow-top-navigation'}));
// Cannot override src and srddoc.
assertThrows(function() {
goog.html.SafeHtml.createIframe(null, null, {'Src': url});
});
assertThrows(function() {
goog.html.SafeHtml.createIframe(null, null, {'Srcdoc': url});
});
// Unsafe src and srcdoc.
assertThrows(function() {
goog.html.SafeHtml.createIframe('http://example.com');
});
assertThrows(function() {
goog.html.SafeHtml.createIframe(null, '<script>alert(1)</script>');
});
// Can set content.
assertSameHtml(
'<iframe><</iframe>',
goog.html.SafeHtml.createIframe(null, null, {'sandbox': null}, '<'));
}
/** @suppress {checkTypes} */
function testSafeHtmlcreateSandboxIframe() {
function assertSameHtmlIfSupportsSandbox(referenceHtml, testedHtmlFunction) {
if (!goog.html.SafeHtml.canUseSandboxIframe()) {
assertThrows(testedHtmlFunction);
} else {
assertSameHtml(referenceHtml, testedHtmlFunction());
}
}
// Setting src and srcdoc.
var url = goog.html.SafeUrl.fromConstant(
goog.string.Const.from('https://google.com/trusted<'));
assertSameHtmlIfSupportsSandbox(
'<iframe src="https://google.com/trusted<" sandbox=""></iframe>',
function() { return goog.html.SafeHtml.createSandboxIframe(url, null); });
// If set with a string, src is sanitized.
assertSameHtmlIfSupportsSandbox(
'<iframe src="' + goog.html.SafeUrl.INNOCUOUS_STRING +
'" sandbox=""></iframe>',
function() {
return goog.html.SafeHtml.createSandboxIframe(
"javascript:evil();", null);
});
var srcdoc = '<br>';
assertSameHtmlIfSupportsSandbox(
'<iframe srcdoc="<br>" sandbox=""></iframe>', function() {
return goog.html.SafeHtml.createSandboxIframe(null, srcdoc);
});
// Cannot override src, srcdoc.
assertThrows(function() {
goog.html.SafeHtml.createSandboxIframe(null, null, {'Src': url});
});
assertThrows(function() {
goog.html.SafeHtml.createSandboxIframe(null, null, {'Srcdoc': url});
});
// Sandboxed by default, and can't be overriden.
assertSameHtmlIfSupportsSandbox('<iframe sandbox=""></iframe>', function() {
return goog.html.SafeHtml.createSandboxIframe();
});
assertThrows(function() {
goog.html.SafeHtml.createSandboxIframe(null, null, {'sandbox': ''});
});
assertThrows(function() {
goog.html.SafeHtml.createSandboxIframe(
null, null, {'SaNdBoX': 'allow-scripts'});
});
assertThrows(function() {
goog.html.SafeHtml.createSandboxIframe(
null, null, {'sandbox': 'allow-same-origin allow-top-navigation'});
});
// Can set content.
assertSameHtmlIfSupportsSandbox(
'<iframe sandbox=""><</iframe>', function() {
return goog.html.SafeHtml.createSandboxIframe(null, null, null, '<');
});
}
function testSafeHtmlCanUseIframeSandbox() {
// We know that the IE < 10 do not support the sandbox attribute, so use them
// as a reference.
if (goog.labs.userAgent.browser.isIE() &&
goog.labs.userAgent.browser.getVersion() < 10) {
assertEquals(false, goog.html.SafeHtml.canUseSandboxIframe());
} else {
assertEquals(true, goog.html.SafeHtml.canUseSandboxIframe());
}
}
function testSafeHtmlCreateScript() {
var script =
goog.html.SafeScript.fromConstant(goog.string.Const.from('function1();'));
var scriptHtml = goog.html.SafeHtml.createScript(script);
assertSameHtml('<script>function1();</script>', scriptHtml);
// Two pieces of script.
var otherScript =
goog.html.SafeScript.fromConstant(goog.string.Const.from('function2();'));
scriptHtml = goog.html.SafeHtml.createScript([script, otherScript]);
assertSameHtml('<script>function1();function2();</script>', scriptHtml);
// Set attribute.
scriptHtml = goog.html.SafeHtml.createScript(script, {'id': 'test'});
assertContains('id="test"', goog.html.SafeHtml.unwrap(scriptHtml));
// Set attribute to null.
scriptHtml =
goog.html.SafeHtml.createScript(goog.html.SafeScript.EMPTY, {'id': null});
assertSameHtml('<script></script>', scriptHtml);
// Set attribute to invalid value.
var exception = assertThrows(function() {
goog.html.SafeHtml.createScript(
goog.html.SafeScript.EMPTY, {'invalid.': 'cantdothis'});
});
assertContains('Invalid attribute name', exception.message);
// Cannot override type attribute.
exception = assertThrows(function() {
goog.html.SafeHtml.createScript(
goog.html.SafeScript.EMPTY, {'Type': 'cantdothis'});
});
assertContains('Cannot set "type"', exception.message);
// Cannot set src attribute.
exception = assertThrows(function() {
goog.html.SafeHtml.createScript(
goog.html.SafeScript.EMPTY, {'src': 'cantdothis'});
});
assertContains('Cannot set "src"', exception.message);
// Directionality.
assertEquals(goog.i18n.bidi.Dir.NEUTRAL, scriptHtml.getDirection());
}
/** @suppress {checkTypes} */
function testSafeHtmlCreateScriptSrc() {
var url = goog.html.TrustedResourceUrl.fromConstant(
goog.string.Const.from('https://google.com/trusted<'));
assertSameHtml(
'<script src="https://google.com/trusted<"></script>',
goog.html.SafeHtml.createScriptSrc(url));
assertSameHtml(
'<script src="https://google.com/trusted<" defer="defer"></script>',
goog.html.SafeHtml.createScriptSrc(url, {'defer': 'defer'}));
// Unsafe src.
assertThrows(function() {
goog.html.SafeHtml.createScriptSrc('http://example.com');
});
// Unsafe attribute.
assertThrows(function() {
goog.html.SafeHtml.createScriptSrc(url, {'onerror': 'alert(1)'});
});
// Cannot override src.
assertThrows(function() {
goog.html.SafeHtml.createScriptSrc(url, {'Src': url});
});
}
function testSafeHtmlCreateMeta() {
var url = goog.html.SafeUrl.fromConstant(
goog.string.Const.from('https://google.com/trusted<'));
// SafeUrl with no timeout gets properly escaped.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="0; url=https://google.com/trusted<">',
goog.html.SafeHtml.createMetaRefresh(url));
// SafeUrl with 0 timeout also gets properly escaped.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="0; url=https://google.com/trusted<">',
goog.html.SafeHtml.createMetaRefresh(url, 0));
// Positive timeouts are supported.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="1337; url=https://google.com/trusted<">',
goog.html.SafeHtml.createMetaRefresh(url, 1337));
// Negative timeouts are also kept, though they're not correct HTML.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="-1337; url=https://google.com/trusted<">',
goog.html.SafeHtml.createMetaRefresh(url, -1337));
// String-based URLs work out of the box.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="0; url=https://google.com/trusted<">',
goog.html.SafeHtml.createMetaRefresh('https://google.com/trusted<'));
// Sanitization happens.
assertSameHtml(
'<meta http-equiv="refresh" ' +
'content="0; url=about:invalid#zClosurez">',
goog.html.SafeHtml.createMetaRefresh('javascript:alert(1)'));
}
function testSafeHtmlCreateStyle() {
var styleSheet = goog.html.SafeStyleSheet.fromConstant(
goog.string.Const.from('P.special { color:"red" ; }'));
var styleHtml = goog.html.SafeHtml.createStyle(styleSheet);
assertSameHtml(
'<style type="text/css">P.special { color:"red" ; }</style>', styleHtml);
// Two stylesheets.
var otherStyleSheet = goog.html.SafeStyleSheet.fromConstant(
goog.string.Const.from('P.regular { color:blue ; }'));
styleHtml = goog.html.SafeHtml.createStyle([styleSheet, otherStyleSheet]);
assertSameHtml(
'<style type="text/css">P.special { color:"red" ; }' +
'P.regular { color:blue ; }</style>',
styleHtml);
// Set attribute.
styleHtml = goog.html.SafeHtml.createStyle(styleSheet, {'id': 'test'});
var styleHtmlString = goog.html.SafeHtml.unwrap(styleHtml);
assertContains('id="test"', styleHtmlString);
assertContains('type="text/css"', styleHtmlString);
// Set attribute to null.
styleHtml = goog.html.SafeHtml.createStyle(
goog.html.SafeStyleSheet.EMPTY, {'id': null});
assertSameHtml('<style type="text/css"></style>', styleHtml);
// Set attribute to invalid value.
var exception = assertThrows(function() {
goog.html.SafeHtml.createStyle(
goog.html.SafeStyleSheet.EMPTY, {'invalid.': 'cantdothis'});
});
assertContains('Invalid attribute name', exception.message);
// Cannot override type attribute.
exception = assertThrows(function() {
goog.html.SafeHtml.createStyle(
goog.html.SafeStyleSheet.EMPTY, {'Type': 'cantdothis'});
});
assertContains('Cannot override "type"', exception.message);
// Directionality.
assertEquals(goog.i18n.bidi.Dir.NEUTRAL, styleHtml.getDirection());
}
function testSafeHtmlCreateWithDir() {
var ltr = goog.i18n.bidi.Dir.LTR;
assertEquals(ltr, goog.html.SafeHtml.createWithDir(ltr, 'br').getDirection());
}
function testSafeHtmlJoin() {
var br = goog.html.SafeHtml.BR;
assertSameHtml(
'Hello<br>World', goog.html.SafeHtml.join(br, ['Hello', 'World']));
assertSameHtml(
'Hello<br>World', goog.html.SafeHtml.join(br, ['Hello', ['World']]));
assertSameHtml('Hello<br>', goog.html.SafeHtml.join('Hello', ['', br]));
var ltr = goog.html.testing.newSafeHtmlForTest('', goog.i18n.bidi.Dir.LTR);
assertEquals(
goog.i18n.bidi.Dir.LTR,
goog.html.SafeHtml.join(br, [ltr, ltr]).getDirection());
}
function testSafeHtmlConcat() {
var br = goog.html.testing.newSafeHtmlForTest('<br>');
var html = goog.html.SafeHtml.htmlEscape('Hello');
assertSameHtml('Hello<br>', goog.html.SafeHtml.concat(html, br));
assertSameHtml('', goog.html.SafeHtml.concat());
assertSameHtml('', goog.html.SafeHtml.concat([]));
assertSameHtml('a<br>c', goog.html.SafeHtml.concat('a', br, 'c'));
assertSameHtml('a<br>c', goog.html.SafeHtml.concat(['a', br, 'c']));
assertSameHtml('a<br>c', goog.html.SafeHtml.concat('a', [br, 'c']));
assertSameHtml('a<br>c', goog.html.SafeHtml.concat(['a'], br, ['c']));
var ltr = goog.html.testing.newSafeHtmlForTest('', goog.i18n.bidi.Dir.LTR);
var rtl = goog.html.testing.newSafeHtmlForTest('', goog.i18n.bidi.Dir.RTL);
var neutral =
goog.html.testing.newSafeHtmlForTest('', goog.i18n.bidi.Dir.NEUTRAL);
var unknown = goog.html.testing.newSafeHtmlForTest('');
assertEquals(
goog.i18n.bidi.Dir.NEUTRAL, goog.html.SafeHtml.concat().getDirection());
assertEquals(
goog.i18n.bidi.Dir.LTR,
goog.html.SafeHtml.concat(ltr, ltr).getDirection());
assertEquals(
goog.i18n.bidi.Dir.LTR,
goog.html.SafeHtml.concat(ltr, neutral, ltr).getDirection());
assertNull(goog.html.SafeHtml.concat(ltr, unknown).getDirection());
assertNull(goog.html.SafeHtml.concat(ltr, rtl).getDirection());
assertNull(goog.html.SafeHtml.concat(ltr, [rtl]).getDirection());
}
function testHtmlEscapePreservingNewlines() {
// goog.html.SafeHtml passes through unchanged.
var safeHtmlIn = goog.html.SafeHtml.htmlEscapePreservingNewlines('<b>in</b>');
assertTrue(
safeHtmlIn ===
goog.html.SafeHtml.htmlEscapePreservingNewlines(safeHtmlIn));
assertSameHtml(
'a<br>c', goog.html.SafeHtml.htmlEscapePreservingNewlines('a\nc'));
assertSameHtml(
'<<br>', goog.html.SafeHtml.htmlEscapePreservingNewlines('<\n'));
assertSameHtml(
'<br>', goog.html.SafeHtml.htmlEscapePreservingNewlines('\r\n'));
assertSameHtml('<br>', goog.html.SafeHtml.htmlEscapePreservingNewlines('\r'));
assertSameHtml('', goog.html.SafeHtml.htmlEscapePreservingNewlines(''));
}
function testHtmlEscapePreservingNewlinesAndSpaces() {
// goog.html.SafeHtml passes through unchanged.
var safeHtmlIn =
goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('<b>in</b>');
assertTrue(
safeHtmlIn ===
goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces(safeHtmlIn));
assertSameHtml(
'a<br>c',
goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('a\nc'));
assertSameHtml(
'<<br>',
goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('<\n'));
assertSameHtml(
'<br>', goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('\r\n'));
assertSameHtml(
'<br>', goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('\r'));
assertSameHtml(
'', goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces(''));
assertSameHtml(
'a  b',
goog.html.SafeHtml.htmlEscapePreservingNewlinesAndSpaces('a b'));
}
function testSafeHtmlConcatWithDir() {
var ltr = goog.i18n.bidi.Dir.LTR;
var rtl = goog.i18n.bidi.Dir.RTL;
var br = goog.html.testing.newSafeHtmlForTest('<br>');
assertEquals(ltr, goog.html.SafeHtml.concatWithDir(ltr).getDirection());
assertEquals(
ltr,
goog.html.SafeHtml
.concatWithDir(ltr, goog.html.testing.newSafeHtmlForTest('', rtl))
.getDirection());
assertSameHtml('a<br>c', goog.html.SafeHtml.concatWithDir(ltr, 'a', br, 'c'));
}
function assertSameHtml(expected, html) {
assertEquals(expected, goog.html.SafeHtml.unwrap(html));
}
| tearDown |
datastructures.py | import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
|
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| self.dicts = dicts |
exercicio-09-35.py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 09\exercicio-09-35.py
##############################################################################
import sys
import os
import os.path
# este módulo ajuda com a conversão de nomes de arquivos para links
# válidos em HTML |
mascara_do_estilo = "'margin: 5px 0px 5px %dpx;'"
def gera_estilo(nível):
return mascara_do_estilo % (nível * 20)
def gera_listagem(página, diretório):
nraiz = os.path.abspath(diretório).count(os.sep)
for raiz, diretórios, arquivos in os.walk(diretório):
nível = raiz.count(os.sep) - nraiz
página.write(f"<p style={gera_estilo(nível)}>{raiz}</p>")
estilo = gera_estilo(nível+1)
for a in arquivos:
caminho_completo = os.path.join(raiz, a)
tamanho = os.path.getsize(caminho_completo)
link = urllib.request.pathname2url(caminho_completo)
página.write(f"<p style={estilo}><a href='{link}'>{a}</a> ({tamanho} bytes)</p>")
if len(sys.argv) < 2:
print("Digite o nome do diretório para coletar os arquivos!")
sys.exit(1)
diretório = sys.argv[1]
página = open("arquivos.html", "w", encoding="utf-8")
página.write("""
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Arquivos</title>
</head>
<body>
""")
página.write(f"Arquivos encontrados a partir do diretório: {diretório}")
gera_listagem(página, diretório)
página.write("""
</body>
</html>
""")
página.close() | import urllib.request |
main.js | const electron = require('electron');
const wallpaper = require('wallpaper');
const axios = require('axios');
const macaddress = require('macaddress');
const url = require('url');
const path = require('path');
const mkdirp = require('mkdirp');
let fs = require('fs');
const qs = require('qs');
const { app, BrowserWindow, Menu, Tray, dialog, ipcMain, screen, shell } = electron;
let { getAppDataPath } = require("appdata-path");
let version = "1.3.5";
let apiUrl = "https://singf.space/pixiv/select_paper.php";
let downloadUrl = "https://github.com/SingularityF/PixivWallpaper/releases/latest";
const gotTheLock = app.requestSingleInstanceLock();
let iconPath = path.join(__dirname, 'icon.ico');
let mainWindow = null;
let tray = null;
let timeout = 90000;
// Dev
//let appPath = __dirname;
// Prod
let appPath = path.join(app.getAppPath(), `../../`);
let startupPath = path.join(getAppDataPath(), `Microsoft/Windows/Start Menu/Programs/Startup/PixivWallpaper.lnk`);
const mainMenuTemplate = [
{
label: 'File',
submenu: [
]
}
];
async function setWallpaper(filePath) {
await wallpaper.set(filePath);
console.log('Wallpaper set!');
mainWindow.webContents.send('set');
mainWindow.webContents.send('set:done');
}
async function removeStartupShortcut() {
await shell.moveItemToTrash(startupPath);
mainWindow.webContents.send('startup:rm:done');
}
async function createStartupShortcut() {
await shell.writeShortcutLink(startupPath, options = { target: path.join(appPath, 'pixivwallpaperclient.exe'), args: '-m' });
shell.showItemInFolder(startupPath);
mainWindow.webContents.send('startup:done');
}
async function checkUpdate() {
mainWindow.webContents.send('updates');
let versionLatest = await axios.get(downloadUrl, {
timeout: timeout
})
.then(res => {
responseUrl = res.request.path;
let versionLatest = responseUrl.split("/").pop();
return versionLatest;
})
.catch(err => {
console.log(err);
mainWindow.webContents.send('error:network');
mainWindow.webContents.send('check:done');
return null;
});
if (versionLatest == null) return;
if (versionLatest == version) {
console.log("Latest");
mainWindow.webContents.send('latest');
mainWindow.webContents.send('check:done');
} else {
mainWindow.webContents.send('hide');
const options = {
type: 'info',
title: 'Attention',
message: `You're running an outdated version of PixivWallpaper. Please consider updating to the latest version.`,
buttons: ['OK']
};
dialog.showMessageBox(mainWindow, options, (index) => {
});
mainWindow.webContents.send('check:done');
}
}
if (!gotTheLock) {
app.quit();
} else {
app.on('second-instance', (event, commandLine, workingDirectory) => {
// Someone tried to run a second instance, we should focus our window.
if (mainWindow) {
mainWindow.show();
mainWindow.focus();
}
})
app.on('ready', () => {
tray = new Tray(iconPath);
const contextMenu = Menu.buildFromTemplate([
{
label: 'Show App',
click() {
mainWindow.show();
}
},
{
label: 'Quit',
click() {
app.isQuiting = true;
app.quit();
}
}
]);
tray.setToolTip('Pixiv Wallpaper');
tray.setContextMenu(contextMenu);
showFlag = true;
// If -m in argument, launch minimized
if (process.argv.some((x) => x == '-m')) {
showFlag = false;
}
mainWindow = new BrowserWindow({
icon: iconPath,
minWidth: 600,
minHeight: 400,
show: showFlag,
webPreferences: {
nodeIntegration: true
}
});
mainWindow.loadURL(url.format({
pathname: path.join(__dirname, 'mainWindow.html'),
protocol: 'file'
}));
const mainMenu = Menu.buildFromTemplate(mainMenuTemplate);
Menu.setApplicationMenu(mainMenu);
mainWindow.on('minimize', (e) => {
e.preventDefault();
mainWindow.hide();
});
mainWindow.on('close', async (e) => {
if (!app.isQuiting) {
e.preventDefault();
const options = {
type: 'info',
title: 'Attention',
message: `This app will continue to run in background to set wallpaper automatically.
You can shut down the app through tray menu.`,
buttons: ['OK']
};
await dialog.showMessageBox(mainWindow, options, (index) => {
});
mainWindow.hide();
}
return false;
});
tray.on('click', (e) => {
mainWindow.show();
});
});
};
ipcMain.on('ready', () => {
checkUpdate();
});
ipcMain.on('startup', () => {
createStartupShortcut();
});
ipcMain.on('startup:rm', () => {
removeStartupShortcut();
});
ipcMain.on('set', (e) => { | macaddress.one((err, mac) => {
if (err) {
console.log('Cannot get MAC address');
mainWindow.webContents.send('error:network');
mainWindow.webContents.send('set:done');
} else {
const { width, height } = screen.getPrimaryDisplay().size;
aspect_ratio = width / height;
let data = qs.stringify({
ar: aspect_ratio,
uuid: mac,
version: '0.0'
});
const options = {
method: 'POST',
headers: { 'content-type': 'application/x-www-form-urlencoded' },
data,
url: apiUrl,
responseType: 'stream'
};
axios.post(apiUrl, data, {
responseType: 'arraybuffer',
timeout: timeout
}).then(async (res) => {
//console.log(res);
await mkdirp(path.join(appPath, 'downloads'), (err) => {
return;
});
let ext = res['headers']['content-type'].split('/')[1];
let filePath = path.join(appPath, `downloads/wallpaper.${ext}`);
fs.writeFile(filePath, res.data, 'binary', (err) => {
if (err) {
console.log('Filed to save wallpaper to local disk');
} else {
console.log('Download complete');
setWallpaper(filePath);
}
});
}).catch((err) => {
console.log(err);
mainWindow.webContents.send('error:network');
mainWindow.webContents.send('set:done');
});
}
});
}); | |
script.rs | use crate::{maybe_print_errors, path::canonicalize, run_block};
use crate::{BufCodecReader, MaybeTextCodec, StringOrBinary};
use nu_errors::ShellError;
use nu_protocol::hir::{
Call, ClassifiedCommand, Expression, InternalCommand, Literal, NamedArguments,
SpannedExpression,
};
use nu_protocol::{Primitive, ReturnSuccess, UntaggedValue, Value};
use nu_stream::{InputStream, ToInputStream};
use crate::EvaluationContext;
use log::{debug, trace};
use nu_source::{Span, Tag, Text};
use std::path::Path;
use std::{error::Error, sync::atomic::Ordering};
use std::{io::BufReader, iter::Iterator};
#[derive(Debug)]
pub enum LineResult {
Success(String),
Error(String, ShellError),
Break,
CtrlC,
CtrlD,
ClearHistory,
}
fn chomp_newline(s: &str) -> &str {
if let Some(s) = s.strip_suffix('\n') {
s
} else {
s
}
}
pub fn run_script_in_dir(
script: String,
dir: &Path,
ctx: &EvaluationContext,
) -> Result<(), Box<dyn Error>> {
//Save path before to switch back to it after executing script
let path_before = ctx.shell_manager.path();
ctx.shell_manager
.set_path(dir.to_string_lossy().to_string());
run_script_standalone(script, false, ctx, false)?;
ctx.shell_manager.set_path(path_before);
Ok(())
}
/// Process the line by parsing the text to turn it into commands, classify those commands so that we understand what is being called in the pipeline, and then run this pipeline
pub fn process_script(
script_text: &str,
ctx: &EvaluationContext,
redirect_stdin: bool,
span_offset: usize,
cli_mode: bool,
) -> LineResult {
if script_text.trim() == "" {
LineResult::Success(script_text.to_string())
} else {
let line = chomp_newline(script_text);
let (block, err) = nu_parser::parse(&line, span_offset, &ctx.scope);
debug!("{:#?}", block);
//println!("{:#?}", pipeline);
if let Some(failure) = err {
return LineResult::Error(line.to_string(), failure.into());
}
// There's a special case to check before we process the pipeline:
// If we're giving a path by itself
// ...and it's not a command in the path
// ...and it doesn't have any arguments
// ...and we're in the CLI
// ...then change to this directory
if cli_mode
&& block.block.len() == 1
&& block.block[0].pipelines.len() == 1
&& block.block[0].pipelines[0].list.len() == 1
{
if let ClassifiedCommand::Internal(InternalCommand {
ref name, ref args, ..
}) = block.block[0].pipelines[0].list[0]
{
let internal_name = name;
let name = args
.positional
.as_ref()
.and_then(|positionals| {
positionals.get(0).map(|e| {
if let Expression::Literal(Literal::String(ref s)) = e.expr {
&s
} else {
""
}
})
})
.unwrap_or("");
if internal_name == "run_external"
&& args
.positional
.as_ref()
.map(|ref v| v.len() == 1)
.unwrap_or(true)
&& args
.named
.as_ref()
.map(NamedArguments::is_empty)
.unwrap_or(true)
&& canonicalize(ctx.shell_manager.path(), name).is_ok()
&& Path::new(&name).is_dir()
&& !ctx.host.lock().is_external_cmd(&name)
{
// Here we work differently if we're in Windows because of the expected Windows behavior
#[cfg(windows)]
{
if name.ends_with(':') {
// This looks like a drive shortcut. We need to a) switch drives and b) go back to the previous directory we were viewing on that drive
// But first, we need to save where we are now
let current_path = ctx.shell_manager.path();
let split_path: Vec<_> = current_path.split(':').collect();
if split_path.len() > 1 {
ctx.windows_drives_previous_cwd
.lock()
.insert(split_path[0].to_string(), current_path);
}
let name = name.to_uppercase();
let new_drive: Vec<_> = name.split(':').collect();
if let Some(val) =
ctx.windows_drives_previous_cwd.lock().get(new_drive[0])
{
ctx.shell_manager.set_path(val.to_string());
return LineResult::Success(line.to_string());
} else {
ctx.shell_manager
.set_path(format!("{}\\", name.to_string()));
return LineResult::Success(line.to_string());
}
} else {
ctx.shell_manager.set_path(name.to_string());
return LineResult::Success(line.to_string());
}
}
#[cfg(not(windows))]
{
ctx.shell_manager.set_path(name.to_string());
return LineResult::Success(line.to_string());
}
}
}
}
let input_stream = if redirect_stdin {
let file = std::io::stdin();
let buf_reader = BufReader::new(file);
let buf_codec = BufCodecReader::new(buf_reader, MaybeTextCodec::default());
let stream = buf_codec.map(|line| {
if let Ok(line) = line {
let primitive = match line {
StringOrBinary::String(s) => Primitive::String(s),
StringOrBinary::Binary(b) => Primitive::Binary(b.into_iter().collect()),
};
Ok(Value {
value: UntaggedValue::Primitive(primitive),
tag: Tag::unknown(),
})
} else {
panic!("Internal error: could not read lines of text from stdin")
}
});
stream.to_input_stream()
} else {
InputStream::empty()
};
trace!("{:#?}", block);
let result = run_block(&block, ctx, input_stream);
match result {
Ok(input) => {
// Running a pipeline gives us back a stream that we can then
// work through. At the top level, we just want to pull on the
// values to compute them.
let autoview_cmd = ctx
.get_command("autoview")
.expect("Could not find autoview command");
if let Ok(mut output_stream) = ctx.run_command(
autoview_cmd,
Tag::unknown(),
Call::new(
Box::new(SpannedExpression::new(
Expression::string("autoview".to_string()),
Span::unknown(),
)),
Span::unknown(),
),
input,
) {
loop {
match output_stream.next() {
Some(Ok(ReturnSuccess::Value(Value {
value: UntaggedValue::Error(e),
..
}))) => return LineResult::Error(line.to_string(), e),
Some(Ok(_item)) => {
if ctx.ctrl_c.load(Ordering::SeqCst) {
break;
}
}
None => break,
Some(Err(e)) => return LineResult::Error(line.to_string(), e),
}
}
}
LineResult::Success(line.to_string())
}
Err(err) => LineResult::Error(line.to_string(), err),
}
}
}
pub fn run_script_standalone(
script_text: String,
redirect_stdin: bool,
context: &EvaluationContext,
exit_on_error: bool,
) -> Result<(), Box<dyn Error>> {
context
.shell_manager
.enter_script_mode()
.map_err(Box::new)?;
let line = process_script(&script_text, context, redirect_stdin, 0, false);
match line {
LineResult::Success(line) => {
let error_code = {
let errors = context.current_errors.clone();
let errors = errors.lock();
if errors.len() > 0 {
1
} else {
0
}
};
maybe_print_errors(&context, Text::from(line));
if error_code != 0 && exit_on_error {
std::process::exit(error_code);
}
}
LineResult::Error(line, err) => {
context
.host
.lock()
.print_err(err, &Text::from(line.clone()));
maybe_print_errors(&context, Text::from(line));
if exit_on_error {
std::process::exit(1);
}
}
_ => {}
}
//exit script mode shell
context.shell_manager.remove_at_current();
| Ok(())
} |
|
conf.py | # -*- coding: utf-8 -*-
#
# CFFI documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 14 16:37:47 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CFFI'
copyright = u'2012-2015, Armin Rigo, Maciej Fijalkowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9'
# The full version, including alpha/beta/rc tags.
release = '1.9.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'CFFIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CFFI.tex', u'CFFI Documentation',
u'Armin Rigo, Maciej Fijalkowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of |
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True | # the title page.
#latex_logo = None |
server.js | const express = require('express');
const webpack = require('webpack');
const webpackDevMiddleware = require('webpack-dev-middleware');
const app = express();
const config = require('./webpack.config');
// 传入配置信息, webpack根据配置信息进行编译
const compiler = webpack(config);
const middleware = webpackDevMiddleware(compiler);
app.use(middleware);
app.listen(3000, () => { | console.log('服务已经开启在3000端口上');
}); |
|
fake.go | package mocks
import (
"testing"
pgapis "github.com/operator-backing-service-samples/postgresql-operator/pkg/apis"
pgv1alpha1 "github.com/operator-backing-service-samples/postgresql-operator/pkg/apis/postgresql/v1alpha1"
olmv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
apiextensionv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
fakedynamic "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
ocav1 "github.com/openshift/api/apps/v1"
knativev1 "knative.dev/serving/pkg/apis/serving/v1"
v1alpha1 "github.com/redhat-developer/service-binding-operator/pkg/apis/apps/v1alpha1"
)
// Fake defines all the elements to fake a kubernetes api client.
type Fake struct {
t *testing.T // testing instance
ns string // namespace
S *runtime.Scheme // runtime client scheme
objs []runtime.Object // all fake objects
}
// AddMockedServiceBindingRequest add mocked object from ServiceBindingRequestMock.
func (f *Fake) AddMockedServiceBindingRequest(
name string,
backingServiceNamespace *string,
backingServiceResourceRef string,
applicationResourceRef string,
applicationGVR schema.GroupVersionResource,
matchLabels map[string]string,
) *v1alpha1.ServiceBindingRequest {
f.S.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ServiceBindingRequest{})
sbr := ServiceBindingRequestMock(f.ns, name, backingServiceNamespace, backingServiceResourceRef, applicationResourceRef, applicationGVR, matchLabels)
f.objs = append(f.objs, sbr)
return sbr
}
// AddMockedServiceBindingRequestWithUnannotated add mocked object from ServiceBindingRequestMock with DetectBindingResources.
func (f *Fake) AddMockedServiceBindingRequestWithUnannotated(
name string,
backingServiceResourceRef string,
applicationResourceRef string,
applicationGVR schema.GroupVersionResource,
matchLabels map[string]string,
) *v1alpha1.ServiceBindingRequest {
f.S.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ServiceBindingRequest{})
sbr := ServiceBindingRequestMock(f.ns, name, nil, backingServiceResourceRef, applicationResourceRef, applicationGVR, matchLabels)
f.objs = append(f.objs, sbr)
return sbr
}
// AddMockedUnstructuredServiceBindingRequest creates a mock ServiceBindingRequest object
func (f *Fake) AddMockedUnstructuredServiceBindingRequest(
name string,
backingServiceResourceRef string,
applicationResourceRef string,
applicationGVR schema.GroupVersionResource,
matchLabels map[string]string,
) *unstructured.Unstructured {
f.S.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ServiceBindingRequest{})
sbr, err := UnstructuredServiceBindingRequestMock(f.ns, name, backingServiceResourceRef, applicationResourceRef, applicationGVR, matchLabels)
require.NoError(f.t, err)
f.objs = append(f.objs, sbr)
return sbr
}
// AddMockedUnstructuredCSV add mocked unstructured CSV.
func (f *Fake) AddMockedUnstructuredCSV(name string) {
require.NoError(f.t, olmv1alpha1.AddToScheme(f.S))
csv, err := UnstructuredClusterServiceVersionMock(f.ns, name)
require.NoError(f.t, err)
f.S.AddKnownTypes(olmv1alpha1.SchemeGroupVersion, &olmv1alpha1.ClusterServiceVersion{})
f.objs = append(f.objs, csv)
}
// AddMockedCSVList add mocked object from ClusterServiceVersionListMock.
func (f *Fake) AddMockedCSVList(name string) {
require.NoError(f.t, olmv1alpha1.AddToScheme(f.S))
f.S.AddKnownTypes(olmv1alpha1.SchemeGroupVersion, &olmv1alpha1.ClusterServiceVersion{})
f.objs = append(f.objs, ClusterServiceVersionListMock(f.ns, name))
}
// AddMockedCSVWithVolumeMountList add mocked object from ClusterServiceVersionListVolumeMountMock.
func (f *Fake) AddMockedCSVWithVolumeMountList(name string) {
require.NoError(f.t, olmv1alpha1.AddToScheme(f.S))
f.S.AddKnownTypes(olmv1alpha1.SchemeGroupVersion, &olmv1alpha1.ClusterServiceVersion{})
f.objs = append(f.objs, ClusterServiceVersionListVolumeMountMock(f.ns, name))
}
// AddMockedUnstructuredCSVWithVolumeMount same than AddMockedCSVWithVolumeMountList but using
// unstructured object.
func (f *Fake) AddMockedUnstructuredCSVWithVolumeMount(name string) {
require.NoError(f.t, olmv1alpha1.AddToScheme(f.S))
csv, err := UnstructuredClusterServiceVersionVolumeMountMock(f.ns, name)
require.NoError(f.t, err)
f.S.AddKnownTypes(olmv1alpha1.SchemeGroupVersion, &olmv1alpha1.ClusterServiceVersion{})
f.objs = append(f.objs, csv)
}
// AddMockedDatabaseCR add mocked object from DatabaseCRMock.
func (f *Fake) AddMockedDatabaseCR(ref string, namespace string) runtime.Object {
require.NoError(f.t, pgapis.AddToScheme(f.S))
f.S.AddKnownTypes(pgv1alpha1.SchemeGroupVersion, &pgv1alpha1.Database{})
mock := DatabaseCRMock(namespace, ref)
f.objs = append(f.objs, mock)
return mock
}
func (f *Fake) AddMockedUnstructuredDatabaseCR(ref string) {
require.NoError(f.t, pgapis.AddToScheme(f.S))
d, err := UnstructuredDatabaseCRMock(f.ns, ref)
require.NoError(f.t, err)
f.objs = append(f.objs, d)
}
// AddMockedUnstructuredDeploymentConfig adds mocked object from UnstructuredDeploymentConfigMock.
func (f *Fake) AddMockedUnstructuredDeploymentConfig(name string, matchLabels map[string]string) {
require.Nil(f.t, ocav1.AddToScheme(f.S))
d, err := UnstructuredDeploymentConfigMock(f.ns, name, matchLabels)
require.Nil(f.t, err)
f.S.AddKnownTypes(ocav1.SchemeGroupVersion, &ocav1.DeploymentConfig{})
f.objs = append(f.objs, d)
}
// AddMockedUnstructuredDeployment add mocked object from UnstructuredDeploymentMock.
func (f *Fake) AddMockedUnstructuredDeployment(name string, matchLabels map[string]string) *unstructured.Unstructured {
require.NoError(f.t, appsv1.AddToScheme(f.S))
d, err := UnstructuredDeploymentMock(f.ns, name, matchLabels)
require.NoError(f.t, err)
f.S.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.Deployment{})
f.objs = append(f.objs, d)
return d
}
// AddMockedUnstructuredKnativeService add mocked object from UnstructuredKnativeService.
func (f *Fake) AddMockedUnstructuredKnativeService(name string, matchLabels map[string]string) {
require.NoError(f.t, knativev1.AddToScheme(f.S))
d, err := UnstructuredKnativeServiceMock(f.ns, name, matchLabels)
require.NoError(f.t, err)
f.S.AddKnownTypes(knativev1.SchemeGroupVersion, &knativev1.Service{})
f.objs = append(f.objs, d)
}
func (f *Fake) AddMockedUnstructuredDatabaseCRD() *unstructured.Unstructured {
require.NoError(f.t, apiextensionv1beta1.AddToScheme(f.S))
c, err := UnstructuredDatabaseCRDMock(f.ns)
require.NoError(f.t, err)
f.S.AddKnownTypes(apiextensionv1beta1.SchemeGroupVersion, &apiextensionv1beta1.CustomResourceDefinition{})
f.objs = append(f.objs, c)
return c
}
func (f *Fake) AddMockedUnstructuredPostgresDatabaseCR(ref string) *unstructured.Unstructured {
d, err := UnstructuredPostgresDatabaseCRMock(f.ns, ref)
require.NoError(f.t, err)
f.objs = append(f.objs, d)
return d
} | f.objs = append(f.objs, SecretMock(f.ns, name))
}
// AddNamespacedMockedSecret add mocked object from SecretMock in a namespace
// which isn't necessarily same as that of the ServiceBindingRequest namespace.
func (f *Fake) AddNamespacedMockedSecret(name string, namespace string) {
f.objs = append(f.objs, SecretMock(namespace, name))
}
// AddMockedUnstructuredConfigMap add mocked object from ConfigMapMock.
func (f *Fake) AddMockedUnstructuredConfigMap(name string) {
mock := ConfigMapMock(f.ns, name)
uObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(mock)
require.NoError(f.t, err)
f.objs = append(f.objs, &unstructured.Unstructured{Object: uObj})
}
func (f *Fake) AddMockResource(resource runtime.Object) {
f.objs = append(f.objs, resource)
}
// FakeClient returns fake structured api client.
func (f *Fake) FakeClient() client.Client {
return fake.NewFakeClientWithScheme(f.S, f.objs...)
}
// FakeDynClient returns fake dynamic api client.
func (f *Fake) FakeDynClient() *fakedynamic.FakeDynamicClient {
return fakedynamic.NewSimpleDynamicClient(f.S, f.objs...)
}
// NewFake instantiate Fake type.
func NewFake(t *testing.T, ns string) *Fake {
return &Fake{t: t, ns: ns, S: scheme.Scheme}
} |
// AddMockedSecret add mocked object from SecretMock.
func (f *Fake) AddMockedSecret(name string) { |
mod.rs | //! Native threads.
//!
//! ## The threading model
//!
//! An executing Rust program consists of a collection of native OS threads,
//! each with their own stack and local state. Threads can be named, and
//! provide some built-in support for low-level synchronization.
//!
//! Communication between threads can be done through
//! [channels], Rust's message-passing types, along with [other forms of thread
//! synchronization](../../std/sync/index.html) and shared-memory data
//! structures. In particular, types that are guaranteed to be
//! threadsafe are easily shared between threads using the
//! atomically-reference-counted container, [`Arc`].
//!
//! Fatal logic errors in Rust cause *thread panic*, during which
//! a thread will unwind the stack, running destructors and freeing
//! owned resources. While not meant as a 'try/catch' mechanism, panics
//! in Rust can nonetheless be caught (unless compiling with `panic=abort`) with
//! [`catch_unwind`](../../std/panic/fn.catch_unwind.html) and recovered
//! from, or alternatively be resumed with
//! [`resume_unwind`](../../std/panic/fn.resume_unwind.html). If the panic
//! is not caught the thread will exit, but the panic may optionally be
//! detected from a different thread with [`join`]. If the main thread panics
//! without the panic being caught, the application will exit with a
//! non-zero exit code.
//!
//! When the main thread of a Rust program terminates, the entire program shuts
//! down, even if other threads are still running. However, this module provides
//! convenient facilities for automatically waiting for the termination of a
//! child thread (i.e., join).
//!
//! ## Spawning a thread
//!
//! A new thread can be spawned using the [`thread::spawn`][`spawn`] function:
//!
//! ```rust
//! use std::thread;
//!
//! thread::spawn(move || {
//! // some work here
//! });
//! ```
//!
//! In this example, the spawned thread is "detached" from the current
//! thread. This means that it can outlive its parent (the thread that spawned
//! it), unless this parent is the main thread.
//!
//! The parent thread can also wait on the completion of the child
//! thread; a call to [`spawn`] produces a [`JoinHandle`], which provides
//! a `join` method for waiting:
//!
//! ```rust
//! use std::thread;
//!
//! let child = thread::spawn(move || {
//! // some work here
//! });
//! // some work here
//! let res = child.join();
//! ```
//!
//! The [`join`] method returns a [`thread::Result`] containing [`Ok`] of the final
//! value produced by the child thread, or [`Err`] of the value given to
//! a call to [`panic!`] if the child panicked.
//!
//! ## Configuring threads
//!
//! A new thread can be configured before it is spawned via the [`Builder`] type,
//! which currently allows you to set the name and stack size for the child thread:
//!
//! ```rust
//! # #![allow(unused_must_use)]
//! use std::thread;
//!
//! thread::Builder::new().name("child1".to_string()).spawn(move || {
//! println!("Hello, world!");
//! });
//! ```
//!
//! ## The `Thread` type
//!
//! Threads are represented via the [`Thread`] type, which you can get in one of
//! two ways:
//!
//! * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
//! function, and calling [`thread`][`JoinHandle::thread`] on the [`JoinHandle`].
//! * By requesting the current thread, using the [`thread::current`] function.
//!
//! The [`thread::current`] function is available even for threads not spawned
//! by the APIs of this module.
//!
//! ## Thread-local storage
//!
//! This module also provides an implementation of thread-local storage for Rust
//! programs. Thread-local storage is a method of storing data into a global
//! variable that each thread in the program will have its own copy of.
//! Threads do not share this data, so accesses do not need to be synchronized.
//!
//! A thread-local key owns the value it contains and will destroy the value when the
//! thread exits. It is created with the [`thread_local!`] macro and can contain any
//! value that is `'static` (no borrowed pointers). It provides an accessor function,
//! [`with`], that yields a shared reference to the value to the specified
//! closure. Thread-local keys allow only shared access to values, as there would be no
//! way to guarantee uniqueness if mutable borrows were allowed. Most values
//! will want to make use of some form of **interior mutability** through the
//! [`Cell`] or [`RefCell`] types.
//!
//! ## Naming threads
//!
//! Threads are able to have associated names for identification purposes. By default, spawned
//! threads are unnamed. To specify a name for a thread, build the thread with [`Builder`] and pass
//! the desired thread name to [`Builder::name`]. To retrieve the thread name from within the
//! thread, use [`Thread::name`]. A couple examples of where the name of a thread gets used:
//!
//! * If a panic occurs in a named thread, the thread name will be printed in the panic message.
//! * The thread name is provided to the OS where applicable (e.g., `pthread_setname_np` in
//! unix-like platforms).
//!
//! ## Stack size
//!
//! The default stack size for spawned threads is 2 MiB, though this particular stack size is
//! subject to change in the future. There are two ways to manually specify the stack size for
//! spawned threads:
//!
//! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`].
//! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack
//! size (in bytes). Note that setting [`Builder::stack_size`] will override this.
//!
//! Note that the stack size of the main thread is *not* determined by Rust.
//!
//! [channels]: crate::sync::mpsc
//! [`join`]: JoinHandle::join
//! [`Result`]: crate::result::Result
//! [`Ok`]: crate::result::Result::Ok
//! [`Err`]: crate::result::Result::Err
//! [`thread::current`]: current
//! [`thread::Result`]: Result
//! [`unpark`]: Thread::unpark
//! [`thread::park_timeout`]: park_timeout
//! [`Cell`]: crate::cell::Cell
//! [`RefCell`]: crate::cell::RefCell
//! [`with`]: LocalKey::with
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
use crate::any::Any;
use crate::cell::UnsafeCell;
use crate::ffi::{CStr, CString};
use crate::fmt;
use crate::io;
use crate::mem;
use crate::num::NonZeroU64;
use crate::num::NonZeroUsize;
use crate::panic;
use crate::panicking;
use crate::str;
use crate::sync::Arc;
use crate::sys::thread as imp;
use crate::sys_common::mutex;
use crate::sys_common::thread;
use crate::sys_common::thread_info;
use crate::sys_common::thread_parker::Parker;
use crate::sys_common::{AsInner, IntoInner};
use crate::time::Duration;
////////////////////////////////////////////////////////////////////////////////
// Thread-local storage
////////////////////////////////////////////////////////////////////////////////
#[macro_use]
mod local;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::local::{AccessError, LocalKey};
// The types used by the thread_local! macro to access TLS keys. Note that there
// are two types, the "OS" type and the "fast" type. The OS thread local key
// type is accessed via platform-specific API calls and is slow, while the fast
// key type is accessed via code generated via LLVM, where TLS keys are set up
// by the elf linker. Note that the OS TLS type is always available: on macOS
// the standard library is compiled with support for older platform versions
// where fast TLS was not available; end-user code is compiled with fast TLS
// where available, but both are needed.
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(target_thread_local)]
#[doc(hidden)]
pub use self::local::fast::Key as __FastLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[doc(hidden)]
pub use self::local::os::Key as __OsLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))]
#[doc(hidden)]
pub use self::local::statik::Key as __StaticLocalKeyInner;
// This is only used to make thread locals with `const { .. }` initialization
// expressions unstable. If and/or when that syntax is stabilized with thread
// locals this will simply be removed.
#[doc(hidden)]
#[unstable(feature = "thread_local_const_init", issue = "84223")]
pub const fn require_unstable_const_init_thread_local() {}
////////////////////////////////////////////////////////////////////////////////
// Builder
////////////////////////////////////////////////////////////////////////////////
/// Thread factory, which can be used in order to configure the properties of
/// a new thread.
///
/// Methods can be chained on it in order to configure it.
///
/// The two configurations available are:
///
/// - [`name`]: specifies an [associated name for the thread][naming-threads]
/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size]
///
/// The [`spawn`] method will take ownership of the builder and create an
/// [`io::Result`] to the thread handle with the given configuration.
///
/// The [`thread::spawn`] free function uses a `Builder` with default
/// configuration and [`unwrap`]s its return value.
///
/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want
/// to recover from a failure to launch a thread, indeed the free function will
/// panic where the `Builder` method will return a [`io::Result`].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [`stack_size`]: Builder::stack_size
/// [`name`]: Builder::name
/// [`spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
/// [`io::Result`]: crate::io::Result
/// [`unwrap`]: crate::result::Result::unwrap
/// [naming-threads]: ./index.html#naming-threads
/// [stack-size]: ./index.html#stack-size
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Builder {
// A name for the thread-to-be, for identification in panic messages
name: Option<String>,
// The size of the stack for the spawned thread in bytes
stack_size: Option<usize>,
}
impl Builder {
/// Generates the base configuration for spawning a thread, from which
/// configuration methods can be chained.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into())
/// .stack_size(32 * 1024);
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> Builder {
Builder { name: None, stack_size: None }
}
/// Names the thread-to-be. Currently the name is used for identification
/// only in panic messages.
///
/// The name must not contain null bytes (`\0`).
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
#[stable(feature = "rust1", since = "1.0.0")]
pub fn name(mut self, name: String) -> Builder {
self.name = Some(name);
self
}
/// Sets the size of the stack (in bytes) for the new thread.
///
/// The actual stack size may be greater than this value if
/// the platform specifies a minimal stack size.
///
/// For more information about the stack size for threads, see
/// [this module-level documentation][stack-size].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new().stack_size(32 * 1024);
/// ```
///
/// [stack-size]: ./index.html#stack-size
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stack_size(mut self, size: usize) -> Builder {
self.stack_size = Some(size);
self
}
/// Spawns a new thread by taking ownership of the `Builder`, and returns an
/// [`io::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the child thread, including recovering its panics.
///
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`io::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// [`io::Result`]: crate::io::Result
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
|
/// Spawns a new thread without any lifetime restrictions by taking ownership
/// of the `Builder`, and returns an [`io::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the child thread, including recovering its panics.
///
/// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`],
/// except for the relaxed lifetime bounds, which render it unsafe.
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`io::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Safety
///
/// The caller has to ensure that no references in the supplied thread closure
/// or its return type can outlive the spawned thread's lifetime. This can be
/// guaranteed in two ways:
///
/// - ensure that [`join`][`JoinHandle::join`] is called before any referenced
/// data is dropped
/// - use only types with `'static` lifetime bounds, i.e., those with no or only
/// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`]
/// and [`thread::spawn`][`spawn`] enforce this property statically)
///
/// # Examples
///
/// ```
/// #![feature(thread_spawn_unchecked)]
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let x = 1;
/// let thread_x = &x;
///
/// let handler = unsafe {
/// builder.spawn_unchecked(move || {
/// println!("x = {}", *thread_x);
/// }).unwrap()
/// };
///
/// // caller has to ensure `join()` is called, otherwise
/// // it is possible to access freed memory if `x` gets
/// // dropped before the thread closure is executed!
/// handler.join().unwrap();
/// ```
///
/// [`io::Result`]: crate::io::Result
#[unstable(feature = "thread_spawn_unchecked", issue = "55132")]
pub unsafe fn spawn_unchecked<'a, F, T>(self, f: F) -> io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'a,
T: Send + 'a,
{
let Builder { name, stack_size } = self;
let stack_size = stack_size.unwrap_or_else(thread::min_stack);
let my_thread = Thread::new(name);
let their_thread = my_thread.clone();
let my_packet: Arc<UnsafeCell<Option<Result<T>>>> = Arc::new(UnsafeCell::new(None));
let their_packet = my_packet.clone();
let output_capture = crate::io::set_output_capture(None);
crate::io::set_output_capture(output_capture.clone());
let main = move || {
if let Some(name) = their_thread.cname() {
imp::Thread::set_name(name);
}
crate::io::set_output_capture(output_capture);
// SAFETY: the stack guard passed is the one for the current thread.
// This means the current thread's stack and the new thread's stack
// are properly set and protected from each other.
thread_info::set(unsafe { imp::guard::current() }, their_thread);
let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
crate::sys_common::backtrace::__rust_begin_short_backtrace(f)
}));
// SAFETY: `their_packet` as been built just above and moved by the
// closure (it is an Arc<...>) and `my_packet` will be stored in the
// same `JoinInner` as this closure meaning the mutation will be
// safe (not modify it and affect a value far away).
unsafe { *their_packet.get() = Some(try_result) };
};
Ok(JoinHandle(JoinInner {
// SAFETY:
//
// `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed
// through FFI or otherwise used with low-level threading primitives that have no
// notion of or way to enforce lifetimes.
//
// As mentioned in the `Safety` section of this function's documentation, the caller of
// this function needs to guarantee that the passed-in lifetime is sufficiently long
// for the lifetime of the thread.
//
// Similarly, the `sys` implementation must guarantee that no references to the closure
// exist after the thread has terminated, which is signaled by `Thread::join`
// returning.
native: unsafe {
Some(imp::Thread::new(
stack_size,
mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(
Box::new(main),
),
)?)
},
thread: my_thread,
packet: Packet(my_packet),
}))
}
}
////////////////////////////////////////////////////////////////////////////////
// Free functions
////////////////////////////////////////////////////////////////////////////////
/// Spawns a new thread, returning a [`JoinHandle`] for it.
///
/// The join handle will implicitly *detach* the child thread upon being
/// dropped. In this case, the child thread may outlive the parent (unless
/// the parent thread is the main thread; the whole process is terminated when
/// the main thread finishes). Additionally, the join handle provides a [`join`]
/// method that can be used to join the child thread. If the child thread
/// panics, [`join`] will return an [`Err`] containing the argument given to
/// [`panic!`].
///
/// This will create a thread using default parameters of [`Builder`], if you
/// want to specify the stack size or the name of the thread, use this API
/// instead.
///
/// As you can see in the signature of `spawn` there are two constraints on
/// both the closure given to `spawn` and its return value, let's explain them:
///
/// - The `'static` constraint means that the closure and its return value
/// must have a lifetime of the whole program execution. The reason for this
/// is that threads can `detach` and outlive the lifetime they have been
/// created in.
/// Indeed if the thread, and by extension its return value, can outlive their
/// caller, we need to make sure that they will be valid afterwards, and since
/// we *can't* know when it will return we need to have them valid as long as
/// possible, that is until the end of the program, hence the `'static`
/// lifetime.
/// - The [`Send`] constraint is because the closure will need to be passed
/// *by value* from the thread where it is spawned to the new thread. Its
/// return value will need to be passed from the new thread to the thread
/// where it is `join`ed.
/// As a reminder, the [`Send`] marker trait expresses that it is safe to be
/// passed from thread to thread. [`Sync`] expresses that it is safe to have a
/// reference be passed from thread to thread.
///
/// # Panics
///
/// Panics if the OS fails to create a thread; use [`Builder::spawn`]
/// to recover from such errors.
///
/// # Examples
///
/// Creating a thread.
///
/// ```
/// use std::thread;
///
/// let handler = thread::spawn(|| {
/// // thread code
/// });
///
/// handler.join().unwrap();
/// ```
///
/// As mentioned in the module documentation, threads are usually made to
/// communicate using [`channels`], here is how it usually looks.
///
/// This example also shows how to use `move`, in order to give ownership
/// of values to a thread.
///
/// ```
/// use std::thread;
/// use std::sync::mpsc::channel;
///
/// let (tx, rx) = channel();
///
/// let sender = thread::spawn(move || {
/// tx.send("Hello, thread".to_owned())
/// .expect("Unable to send on channel");
/// });
///
/// let receiver = thread::spawn(move || {
/// let value = rx.recv().expect("Unable to receive from channel");
/// println!("{}", value);
/// });
///
/// sender.join().expect("The sender thread has panicked");
/// receiver.join().expect("The receiver thread has panicked");
/// ```
///
/// A thread can also return a value through its [`JoinHandle`], you can use
/// this to make asynchronous computations (futures might be more appropriate
/// though).
///
/// ```
/// use std::thread;
///
/// let computation = thread::spawn(|| {
/// // Some expensive computation.
/// 42
/// });
///
/// let result = computation.join().unwrap();
/// println!("{}", result);
/// ```
///
/// [`channels`]: crate::sync::mpsc
/// [`join`]: JoinHandle::join
/// [`Err`]: crate::result::Result::Err
#[stable(feature = "rust1", since = "1.0.0")]
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
Builder::new().spawn(f).expect("failed to spawn thread")
}
/// Gets a handle to the thread that invokes it.
///
/// # Examples
///
/// Getting a handle to the current thread with `thread::current()`:
///
/// ```
/// use std::thread;
///
/// let handler = thread::Builder::new()
/// .name("named thread".into())
/// .spawn(|| {
/// let handle = thread::current();
/// assert_eq!(handle.name(), Some("named thread"));
/// })
/// .unwrap();
///
/// handler.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn current() -> Thread {
thread_info::current_thread().expect(
"use of std::thread::current() is not possible \
after the thread's local data has been destroyed",
)
}
/// Cooperatively gives up a timeslice to the OS scheduler.
///
/// This is used when the programmer knows that the thread will have nothing
/// to do for some time, and thus avoid wasting computing time.
///
/// For example when polling on a resource, it is common to check that it is
/// available, and if not to yield in order to avoid busy waiting.
///
/// Thus the pattern of `yield`ing after a failed poll is rather common when
/// implementing low-level shared resources or synchronization primitives.
///
/// However programmers will usually prefer to use [`channel`]s, [`Condvar`]s,
/// [`Mutex`]es or [`join`] for their synchronization routines, as they avoid
/// thinking about thread scheduling.
///
/// Note that [`channel`]s for example are implemented using this primitive.
/// Indeed when you call `send` or `recv`, which are blocking, they will yield
/// if the channel is not available.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// thread::yield_now();
/// ```
///
/// [`channel`]: crate::sync::mpsc
/// [`join`]: JoinHandle::join
/// [`Condvar`]: crate::sync::Condvar
/// [`Mutex`]: crate::sync::Mutex
#[stable(feature = "rust1", since = "1.0.0")]
pub fn yield_now() {
imp::Thread::yield_now()
}
/// Determines whether the current thread is unwinding because of panic.
///
/// A common use of this feature is to poison shared resources when writing
/// unsafe code, by checking `panicking` when the `drop` is called.
///
/// This is usually not needed when writing safe code, as [`Mutex`es][Mutex]
/// already poison themselves when a thread panics while holding the lock.
///
/// This can also be used in multithreaded applications, in order to send a
/// message to other threads warning that a thread has panicked (e.g., for
/// monitoring purposes).
///
/// # Examples
///
/// ```should_panic
/// use std::thread;
///
/// struct SomeStruct;
///
/// impl Drop for SomeStruct {
/// fn drop(&mut self) {
/// if thread::panicking() {
/// println!("dropped while unwinding");
/// } else {
/// println!("dropped while not unwinding");
/// }
/// }
/// }
///
/// {
/// print!("a: ");
/// let a = SomeStruct;
/// }
///
/// {
/// print!("b: ");
/// let b = SomeStruct;
/// panic!()
/// }
/// ```
///
/// [Mutex]: crate::sync::Mutex
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn panicking() -> bool {
panicking::panicking()
}
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// The thread may sleep longer than the duration specified due to scheduling
/// specifics or platform-dependent functionality. It will never sleep less.
///
/// This function is blocking, and should not be used in `async` functions.
///
/// # Platform-specific behavior
///
/// On Unix platforms, the underlying syscall may be interrupted by a
/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
/// the specified duration, this function may invoke that system call multiple
/// times.
///
/// # Examples
///
/// ```no_run
/// use std::thread;
///
/// // Let's sleep for 2 seconds:
/// thread::sleep_ms(2000);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_deprecated(since = "1.6.0", reason = "replaced by `std::thread::sleep`")]
pub fn sleep_ms(ms: u32) {
sleep(Duration::from_millis(ms as u64))
}
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// The thread may sleep longer than the duration specified due to scheduling
/// specifics or platform-dependent functionality. It will never sleep less.
///
/// This function is blocking, and should not be used in `async` functions.
///
/// # Platform-specific behavior
///
/// On Unix platforms, the underlying syscall may be interrupted by a
/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
/// the specified duration, this function may invoke that system call multiple
/// times.
/// Platforms which do not support nanosecond precision for sleeping will
/// have `dur` rounded up to the nearest granularity of time they can sleep for.
///
/// Currently, specifying a zero duration on Unix platforms returns immediately
/// without invoking the underlying [`nanosleep`] syscall, whereas on Windows
/// platforms the underlying [`Sleep`] syscall is always invoked.
/// If the intention is to yield the current time-slice you may want to use
/// [`yield_now`] instead.
///
/// [`nanosleep`]: https://linux.die.net/man/2/nanosleep
/// [`Sleep`]: https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-sleep
///
/// # Examples
///
/// ```no_run
/// use std::{thread, time};
///
/// let ten_millis = time::Duration::from_millis(10);
/// let now = time::Instant::now();
///
/// thread::sleep(ten_millis);
///
/// assert!(now.elapsed() >= ten_millis);
/// ```
#[stable(feature = "thread_sleep", since = "1.4.0")]
pub fn sleep(dur: Duration) {
imp::Thread::sleep(dur)
}
/// Blocks unless or until the current thread's token is made available.
///
/// A call to `park` does not guarantee that the thread will remain parked
/// forever, and callers should be prepared for this possibility.
///
/// # park and unpark
///
/// Every thread is equipped with some basic low-level blocking support, via the
/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`]
/// method. [`park`] blocks the current thread, which can then be resumed from
/// another thread by calling the [`unpark`] method on the blocked thread's
/// handle.
///
/// Conceptually, each [`Thread`] handle has an associated token, which is
/// initially not present:
///
/// * The [`thread::park`][`park`] function blocks the current thread unless or
/// until the token is available for its thread handle, at which point it
/// atomically consumes the token. It may also return *spuriously*, without
/// consuming the token. [`thread::park_timeout`] does the same, but allows
/// specifying a maximum time to block the thread for.
///
/// * The [`unpark`] method on a [`Thread`] atomically makes the token available
/// if it wasn't already. Because the token is initially absent, [`unpark`]
/// followed by [`park`] will result in the second call returning immediately.
///
/// In other words, each [`Thread`] acts a bit like a spinlock that can be
/// locked and unlocked using `park` and `unpark`.
///
/// Notice that being unblocked does not imply any synchronization with someone
/// that unparked this thread, it could also be spurious.
/// For example, it would be a valid, but inefficient, implementation to make both [`park`] and
/// [`unpark`] return immediately without doing anything.
///
/// The API is typically used by acquiring a handle to the current thread,
/// placing that handle in a shared data structure so that other threads can
/// find it, and then `park`ing in a loop. When some desired condition is met, another
/// thread calls [`unpark`] on the handle.
///
/// The motivation for this design is twofold:
///
/// * It avoids the need to allocate mutexes and condvars when building new
/// synchronization primitives; the threads already provide basic
/// blocking/signaling.
///
/// * It can be implemented very efficiently on many platforms.
///
/// # Examples
///
/// ```
/// use std::thread;
/// use std::sync::{Arc, atomic::{Ordering, AtomicBool}};
/// use std::time::Duration;
///
/// let flag = Arc::new(AtomicBool::new(false));
/// let flag2 = Arc::clone(&flag);
///
/// let parked_thread = thread::spawn(move || {
/// // We want to wait until the flag is set. We *could* just spin, but using
/// // park/unpark is more efficient.
/// while !flag2.load(Ordering::Acquire) {
/// println!("Parking thread");
/// thread::park();
/// // We *could* get here spuriously, i.e., way before the 10ms below are over!
/// // But that is no problem, we are in a loop until the flag is set anyway.
/// println!("Thread unparked");
/// }
/// println!("Flag received");
/// });
///
/// // Let some time pass for the thread to be spawned.
/// thread::sleep(Duration::from_millis(10));
///
/// // Set the flag, and let the thread wake up.
/// // There is no race condition here, if `unpark`
/// // happens first, `park` will return immediately.
/// // Hence there is no risk of a deadlock.
/// flag.store(true, Ordering::Release);
/// println!("Unpark the thread");
/// parked_thread.thread().unpark();
///
/// parked_thread.join().unwrap();
/// ```
///
/// [`unpark`]: Thread::unpark
/// [`thread::park_timeout`]: park_timeout
#[stable(feature = "rust1", since = "1.0.0")]
pub fn park() {
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.parker.park();
}
}
/// Use [`park_timeout`].
///
/// Blocks unless or until the current thread's token is made available or
/// the specified duration has been reached (may wake spuriously).
///
/// The semantics of this function are equivalent to [`park`] except
/// that the thread will be blocked for roughly no longer than `dur`. This
/// method should not be used for precise timing due to anomalies such as
/// preemption or platform differences that may not cause the maximum
/// amount of time waited to be precisely `ms` long.
///
/// See the [park documentation][`park`] for more detail.
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_deprecated(since = "1.6.0", reason = "replaced by `std::thread::park_timeout`")]
pub fn park_timeout_ms(ms: u32) {
park_timeout(Duration::from_millis(ms as u64))
}
/// Blocks unless or until the current thread's token is made available or
/// the specified duration has been reached (may wake spuriously).
///
/// The semantics of this function are equivalent to [`park`][park] except
/// that the thread will be blocked for roughly no longer than `dur`. This
/// method should not be used for precise timing due to anomalies such as
/// preemption or platform differences that may not cause the maximum
/// amount of time waited to be precisely `dur` long.
///
/// See the [park documentation][park] for more details.
///
/// # Platform-specific behavior
///
/// Platforms which do not support nanosecond precision for sleeping will have
/// `dur` rounded up to the nearest granularity of time they can sleep for.
///
/// # Examples
///
/// Waiting for the complete expiration of the timeout:
///
/// ```rust,no_run
/// use std::thread::park_timeout;
/// use std::time::{Instant, Duration};
///
/// let timeout = Duration::from_secs(2);
/// let beginning_park = Instant::now();
///
/// let mut timeout_remaining = timeout;
/// loop {
/// park_timeout(timeout_remaining);
/// let elapsed = beginning_park.elapsed();
/// if elapsed >= timeout {
/// break;
/// }
/// println!("restarting park_timeout after {:?}", elapsed);
/// timeout_remaining = timeout - elapsed;
/// }
/// ```
#[stable(feature = "park_timeout", since = "1.4.0")]
pub fn park_timeout(dur: Duration) {
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.parker.park_timeout(dur);
}
}
////////////////////////////////////////////////////////////////////////////////
// ThreadId
////////////////////////////////////////////////////////////////////////////////
/// A unique identifier for a running thread.
///
/// A `ThreadId` is an opaque object that has a unique value for each thread
/// that creates one. `ThreadId`s are not guaranteed to correspond to a thread's
/// system-designated identifier. A `ThreadId` can be retrieved from the [`id`]
/// method on a [`Thread`].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let other_thread = thread::spawn(|| {
/// thread::current().id()
/// });
///
/// let other_thread_id = other_thread.join().unwrap();
/// assert!(thread::current().id() != other_thread_id);
/// ```
///
/// [`id`]: Thread::id
#[stable(feature = "thread_id", since = "1.19.0")]
#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
pub struct ThreadId(NonZeroU64);
impl ThreadId {
// Generate a new unique thread ID.
fn new() -> ThreadId {
// It is UB to attempt to acquire this mutex reentrantly!
static GUARD: mutex::StaticMutex = mutex::StaticMutex::new();
static mut COUNTER: u64 = 1;
unsafe {
let _guard = GUARD.lock();
// If we somehow use up all our bits, panic so that we're not
// covering up subtle bugs of IDs being reused.
if COUNTER == u64::MAX {
panic!("failed to generate unique thread ID: bitspace exhausted");
}
let id = COUNTER;
COUNTER += 1;
ThreadId(NonZeroU64::new(id).unwrap())
}
}
/// This returns a numeric identifier for the thread identified by this
/// `ThreadId`.
///
/// As noted in the documentation for the type itself, it is essentially an
/// opaque ID, but is guaranteed to be unique for each thread. The returned
/// value is entirely opaque -- only equality testing is stable. Note that
/// it is not guaranteed which values new threads will return, and this may
/// change across Rust versions.
#[unstable(feature = "thread_id_value", issue = "67939")]
pub fn as_u64(&self) -> NonZeroU64 {
self.0
}
}
////////////////////////////////////////////////////////////////////////////////
// Thread
////////////////////////////////////////////////////////////////////////////////
/// The internal representation of a `Thread` handle
struct Inner {
name: Option<CString>, // Guaranteed to be UTF-8
id: ThreadId,
parker: Parker,
}
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
/// A handle to a thread.
///
/// Threads are represented via the `Thread` type, which you can get in one of
/// two ways:
///
/// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
/// function, and calling [`thread`][`JoinHandle::thread`] on the
/// [`JoinHandle`].
/// * By requesting the current thread, using the [`thread::current`] function.
///
/// The [`thread::current`] function is available even for threads not spawned
/// by the APIs of this module.
///
/// There is usually no need to create a `Thread` struct yourself, one
/// should instead use a function like `spawn` to create new threads, see the
/// docs of [`Builder`] and [`spawn`] for more details.
///
/// [`thread::current`]: current
pub struct Thread {
inner: Arc<Inner>,
}
impl Thread {
// Used only internally to construct a thread object without spawning
// Panics if the name contains nuls.
pub(crate) fn new(name: Option<String>) -> Thread {
let cname =
name.map(|n| CString::new(n).expect("thread name may not contain interior null bytes"));
Thread {
inner: Arc::new(Inner { name: cname, id: ThreadId::new(), parker: Parker::new() }),
}
}
/// Atomically makes the handle's token available if it is not already.
///
/// Every thread is equipped with some basic low-level blocking support, via
/// the [`park`][park] function and the `unpark()` method. These can be
/// used as a more CPU-efficient implementation of a spinlock.
///
/// See the [park documentation][park] for more details.
///
/// # Examples
///
/// ```
/// use std::thread;
/// use std::time::Duration;
///
/// let parked_thread = thread::Builder::new()
/// .spawn(|| {
/// println!("Parking thread");
/// thread::park();
/// println!("Thread unparked");
/// })
/// .unwrap();
///
/// // Let some time pass for the thread to be spawned.
/// thread::sleep(Duration::from_millis(10));
///
/// println!("Unpark the thread");
/// parked_thread.thread().unpark();
///
/// parked_thread.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn unpark(&self) {
self.inner.parker.unpark();
}
/// Gets the thread's unique identifier.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let other_thread = thread::spawn(|| {
/// thread::current().id()
/// });
///
/// let other_thread_id = other_thread.join().unwrap();
/// assert!(thread::current().id() != other_thread_id);
/// ```
#[stable(feature = "thread_id", since = "1.19.0")]
pub fn id(&self) -> ThreadId {
self.inner.id
}
/// Gets the thread's name.
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// Threads by default have no name specified:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// assert!(thread::current().name().is_none());
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// Thread with a specified name:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
#[stable(feature = "rust1", since = "1.0.0")]
pub fn name(&self) -> Option<&str> {
self.cname().map(|s| unsafe { str::from_utf8_unchecked(s.to_bytes()) })
}
fn cname(&self) -> Option<&CStr> {
self.inner.name.as_deref()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Thread {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Thread")
.field("id", &self.id())
.field("name", &self.name())
.finish_non_exhaustive()
}
}
////////////////////////////////////////////////////////////////////////////////
// JoinHandle
////////////////////////////////////////////////////////////////////////////////
/// A specialized [`Result`] type for threads.
///
/// Indicates the manner in which a thread exited.
///
/// The value contained in the `Result::Err` variant
/// is the value the thread panicked with;
/// that is, the argument the `panic!` macro was called with.
/// Unlike with normal errors, this value doesn't implement
/// the [`Error`](crate::error::Error) trait.
///
/// Thus, a sensible way to handle a thread panic is to either:
///
/// 1. propagate the panic with [`std::panic::resume_unwind`]
/// 2. or in case the thread is intended to be a subsystem boundary
/// that is supposed to isolate system-level failures,
/// match on the `Err` variant and handle the panic in an appropriate way
///
/// A thread that completes without panicking is considered to exit successfully.
///
/// # Examples
///
/// Matching on the result of a joined thread:
///
/// ```no_run
/// use std::{fs, thread, panic};
///
/// fn copy_in_thread() -> thread::Result<()> {
/// thread::spawn(|| {
/// fs::copy("foo.txt", "bar.txt").unwrap();
/// }).join()
/// }
///
/// fn main() {
/// match copy_in_thread() {
/// Ok(_) => println!("copy succeeded"),
/// Err(e) => panic::resume_unwind(e),
/// }
/// }
/// ```
///
/// [`Result`]: crate::result::Result
/// [`std::panic::resume_unwind`]: crate::panic::resume_unwind
#[stable(feature = "rust1", since = "1.0.0")]
pub type Result<T> = crate::result::Result<T, Box<dyn Any + Send + 'static>>;
// This packet is used to communicate the return value between the child thread
// and the parent thread. Memory is shared through the `Arc` within and there's
// no need for a mutex here because synchronization happens with `join()` (the
// parent thread never reads this packet until the child has exited).
//
// This packet itself is then stored into a `JoinInner` which in turns is placed
// in `JoinHandle` and `JoinGuard`. Due to the usage of `UnsafeCell` we need to
// manually worry about impls like Send and Sync. The type `T` should
// already always be Send (otherwise the thread could not have been created) and
// this type is inherently Sync because no methods take &self. Regardless,
// however, we add inheriting impls for Send/Sync to this type to ensure it's
// Send/Sync and that future modifications will still appropriately classify it.
struct Packet<T>(Arc<UnsafeCell<Option<Result<T>>>>);
unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T: Sync> Sync for Packet<T> {}
/// Inner representation for JoinHandle
struct JoinInner<T> {
native: Option<imp::Thread>,
thread: Thread,
packet: Packet<T>,
}
impl<T> JoinInner<T> {
fn join(&mut self) -> Result<T> {
self.native.take().unwrap().join();
unsafe { (*self.packet.0.get()).take().unwrap() }
}
}
/// An owned permission to join on a thread (block on its termination).
///
/// A `JoinHandle` *detaches* the associated thread when it is dropped, which
/// means that there is no longer any handle to thread and no way to `join`
/// on it.
///
/// Due to platform restrictions, it is not possible to [`Clone`] this
/// handle: the ability to join a thread is a uniquely-owned permission.
///
/// This `struct` is created by the [`thread::spawn`] function and the
/// [`thread::Builder::spawn`] method.
///
/// # Examples
///
/// Creation from [`thread::spawn`]:
///
/// ```
/// use std::thread;
///
/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| {
/// // some work here
/// });
/// ```
///
/// Creation from [`thread::Builder::spawn`]:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// ```
///
/// Child being detached and outliving its parent:
///
/// ```no_run
/// use std::thread;
/// use std::time::Duration;
///
/// let original_thread = thread::spawn(|| {
/// let _detached_thread = thread::spawn(|| {
/// // Here we sleep to make sure that the first thread returns before.
/// thread::sleep(Duration::from_millis(10));
/// // This will be called, even though the JoinHandle is dropped.
/// println!("♫ Still alive ♫");
/// });
/// });
///
/// original_thread.join().expect("The thread being joined has panicked");
/// println!("Original thread is joined.");
///
/// // We make sure that the new thread has time to run, before the main
/// // thread returns.
///
/// thread::sleep(Duration::from_millis(1000));
/// ```
///
/// [`thread::Builder::spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
#[stable(feature = "rust1", since = "1.0.0")]
pub struct JoinHandle<T>(JoinInner<T>);
#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
unsafe impl<T> Send for JoinHandle<T> {}
#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
unsafe impl<T> Sync for JoinHandle<T> {}
impl<T> JoinHandle<T> {
/// Extracts a handle to the underlying thread.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
///
/// let thread = join_handle.thread();
/// println!("thread id: {:?}", thread.id());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn thread(&self) -> &Thread {
&self.0.thread
}
/// Waits for the associated thread to finish.
///
/// In terms of [atomic memory orderings], the completion of the associated
/// thread synchronizes with this function returning. In other words, all
/// operations performed by that thread are ordered before all
/// operations that happen after `join` returns.
///
/// If the child thread panics, [`Err`] is returned with the parameter given
/// to [`panic!`].
///
/// [`Err`]: crate::result::Result::Err
/// [atomic memory orderings]: crate::sync::atomic
///
/// # Panics
///
/// This function may panic on some platforms if a thread attempts to join
/// itself or otherwise may create a deadlock with joining threads.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// join_handle.join().expect("Couldn't join on the associated thread");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn join(mut self) -> Result<T> {
self.0.join()
}
}
impl<T> AsInner<imp::Thread> for JoinHandle<T> {
fn as_inner(&self) -> &imp::Thread {
self.0.native.as_ref().unwrap()
}
}
impl<T> IntoInner<imp::Thread> for JoinHandle<T> {
fn into_inner(self) -> imp::Thread {
self.0.native.unwrap()
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<T> fmt::Debug for JoinHandle<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("JoinHandle").finish_non_exhaustive()
}
}
fn _assert_sync_and_send() {
fn _assert_both<T: Send + Sync>() {}
_assert_both::<JoinHandle<()>>();
_assert_both::<Thread>();
}
/// Returns the number of hardware threads available to the program.
///
/// This value should be considered only a hint.
///
/// # Platform-specific behavior
///
/// If interpreted as the number of actual hardware threads, it may undercount on
/// Windows systems with more than 64 hardware threads. If interpreted as the
/// available concurrency for that process, it may overcount on Windows systems
/// when limited by a process wide affinity mask or job object limitations, and
/// it may overcount on Linux systems when limited by a process wide affinity
/// mask or affected by cgroups limits.
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// - If the number of hardware threads is not known for the target platform.
/// - The process lacks permissions to view the number of hardware threads
/// available.
///
/// # Examples
///
/// ```
/// # #![allow(dead_code)]
/// #![feature(available_concurrency)]
/// use std::thread;
///
/// let count = thread::available_concurrency().map(|n| n.get()).unwrap_or(1);
/// ```
#[unstable(feature = "available_concurrency", issue = "74479")]
pub fn available_concurrency() -> io::Result<NonZeroUsize> {
imp::available_concurrency()
}
| {
unsafe { self.spawn_unchecked(f) }
} |
cli-self-upd.rs | //! Testing self install, uninstall and update
pub mod mock;
use crate::mock::clitools::{
self, expect_err, expect_err_ex, expect_ok, expect_ok_contains, expect_ok_ex, expect_stderr_ok,
expect_stdout_ok, this_host_triple, Config, Scenario,
};
use crate::mock::dist::calc_hash;
use crate::mock::{get_path, restore_path};
use lazy_static::lazy_static;
use remove_dir_all::remove_dir_all;
use rustup::utils::{raw, utils};
use rustup::Notification;
use std::env;
use std::env::consts::EXE_SUFFIX;
use std::fs;
use std::path::Path;
use std::process::Command;
use std::sync::Mutex;
use tempdir::TempDir;
macro_rules! for_host {
($s: expr) => {
&format!($s, this_host_triple())
};
}
const TEST_VERSION: &str = "1.1.1";
pub fn setup(f: &dyn Fn(&Config)) {
clitools::setup(Scenario::SimpleV2, &|config| {
// Lock protects environment variables
lazy_static! {
static ref LOCK: Mutex<()> = Mutex::new(());
}
let _g = LOCK.lock();
// On windows these tests mess with the user's PATH. Save
// and restore them here to keep from trashing things.
let saved_path = get_path();
let _g = scopeguard::guard(saved_path, restore_path);
f(config);
});
}
pub fn update_setup(f: &dyn Fn(&Config, &Path)) {
setup(&|config| {
// Create a mock self-update server
let self_dist_tmp = TempDir::new("self_dist").unwrap();
let self_dist = self_dist_tmp.path();
let trip = this_host_triple();
let dist_dir = self_dist.join(&format!("archive/{}/{}", TEST_VERSION, trip));
let dist_exe = dist_dir.join(&format!("rustup-init{}", EXE_SUFFIX));
let rustup_bin = config.exedir.join(&format!("rustup-init{}", EXE_SUFFIX));
fs::create_dir_all(dist_dir).unwrap();
output_release_file(self_dist, "1", TEST_VERSION);
fs::copy(&rustup_bin, &dist_exe).unwrap();
// Modify the exe so it hashes different
raw::append_file(&dist_exe, "").unwrap();
let root_url = format!("file://{}", self_dist.display());
env::set_var("RUSTUP_UPDATE_ROOT", root_url);
f(config, self_dist);
});
}
fn output_release_file(dist_dir: &Path, schema: &str, version: &str) {
let contents = format!(
r#"
schema-version = "{}"
version = "{}"
"#,
schema, version
);
let file = dist_dir.join("release-stable.toml");
utils::write_file("release", &file, &contents).unwrap();
}
#[test]
fn install_bins_to_cargo_home() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let rustc = config.cargodir.join(&format!("bin/rustc{}", EXE_SUFFIX));
let rustdoc = config.cargodir.join(&format!("bin/rustdoc{}", EXE_SUFFIX));
let cargo = config.cargodir.join(&format!("bin/cargo{}", EXE_SUFFIX));
let rust_lldb = config
.cargodir
.join(&format!("bin/rust-lldb{}", EXE_SUFFIX));
let rust_gdb = config.cargodir.join(&format!("bin/rust-gdb{}", EXE_SUFFIX));
assert!(rustup.exists());
assert!(rustc.exists());
assert!(rustdoc.exists());
assert!(cargo.exists());
assert!(rust_lldb.exists());
assert!(rust_gdb.exists());
});
}
#[test]
fn install_twice() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
assert!(rustup.exists());
});
}
#[test]
#[cfg(unix)]
fn bins_are_executable() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let rustc = config.cargodir.join(&format!("bin/rustc{}", EXE_SUFFIX));
let rustdoc = config.cargodir.join(&format!("bin/rustdoc{}", EXE_SUFFIX));
let cargo = config.cargodir.join(&format!("bin/cargo{}", EXE_SUFFIX));
let rust_lldb = config
.cargodir
.join(&format!("bin/rust-lldb{}", EXE_SUFFIX));
let rust_gdb = config.cargodir.join(&format!("bin/rust-gdb{}", EXE_SUFFIX));
assert!(is_exe(&rustup));
assert!(is_exe(&rustc));
assert!(is_exe(&rustdoc));
assert!(is_exe(&cargo));
assert!(is_exe(&rust_lldb));
assert!(is_exe(&rust_gdb));
});
fn is_exe(path: &Path) -> bool {
use std::os::unix::fs::MetadataExt;
let mode = path.metadata().unwrap().mode();
mode & 0o777 == 0o755
}
}
#[test]
fn install_creates_cargo_home() {
setup(&|config| {
remove_dir_all(&config.cargodir).unwrap();
remove_dir_all(&config.rustupdir).unwrap();
expect_ok(config, &["rustup-init", "-y"]);
assert!(config.cargodir.exists());
});
}
#[test]
fn uninstall_deletes_bins() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let rustc = config.cargodir.join(&format!("bin/rustc{}", EXE_SUFFIX));
let rustdoc = config.cargodir.join(&format!("bin/rustdoc{}", EXE_SUFFIX));
let cargo = config.cargodir.join(&format!("bin/cargo{}", EXE_SUFFIX));
let rust_lldb = config
.cargodir
.join(&format!("bin/rust-lldb{}", EXE_SUFFIX));
let rust_gdb = config.cargodir.join(&format!("bin/rust-gdb{}", EXE_SUFFIX));
assert!(!rustup.exists());
assert!(!rustc.exists());
assert!(!rustdoc.exists());
assert!(!cargo.exists());
assert!(!rust_lldb.exists());
assert!(!rust_gdb.exists());
});
}
#[test]
fn uninstall_works_if_some_bins_dont_exist() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let rustc = config.cargodir.join(&format!("bin/rustc{}", EXE_SUFFIX));
let rustdoc = config.cargodir.join(&format!("bin/rustdoc{}", EXE_SUFFIX));
let cargo = config.cargodir.join(&format!("bin/cargo{}", EXE_SUFFIX));
let rust_lldb = config
.cargodir
.join(&format!("bin/rust-lldb{}", EXE_SUFFIX));
let rust_gdb = config.cargodir.join(&format!("bin/rust-gdb{}", EXE_SUFFIX));
fs::remove_file(&rustc).unwrap();
fs::remove_file(&cargo).unwrap();
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
assert!(!rustup.exists());
assert!(!rustc.exists());
assert!(!rustdoc.exists());
assert!(!cargo.exists());
assert!(!rust_lldb.exists());
assert!(!rust_gdb.exists());
});
}
#[test]
fn uninstall_deletes_rustup_home() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "default", "nightly"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
assert!(!config.rustupdir.exists());
});
}
#[test]
fn uninstall_works_if_rustup_home_doesnt_exist() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
raw::remove_dir(&config.rustupdir).unwrap();
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
});
}
#[test]
fn uninstall_deletes_cargo_home() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
assert!(!config.cargodir.exists());
});
}
#[test]
fn uninstall_fails_if_not_installed() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
fs::remove_file(&rustup).unwrap();
expect_err(
config,
&["rustup", "self", "uninstall", "-y"],
"rustup is not installed",
);
});
}
// The other tests here just run rustup from a temp directory. This
// does the uninstall by actually invoking the installed binary in
// order to test that it can successfully delete itself.
#[test]
#[cfg_attr(target_os = "macos", ignore)] // FIXME #1515
fn uninstall_self_delete_works() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let mut cmd = Command::new(rustup.clone());
cmd.args(&["self", "uninstall", "-y"]);
clitools::env(config, &mut cmd);
let out = cmd.output().unwrap();
println!("out: {}", String::from_utf8(out.stdout).unwrap());
println!("err: {}", String::from_utf8(out.stderr).unwrap());
assert!(out.status.success());
assert!(!rustup.exists());
assert!(!config.cargodir.exists());
let rustc = config.cargodir.join(&format!("bin/rustc{}", EXE_SUFFIX));
let rustdoc = config.cargodir.join(&format!("bin/rustdoc{}", EXE_SUFFIX));
let cargo = config.cargodir.join(&format!("bin/cargo{}", EXE_SUFFIX));
let rust_lldb = config
.cargodir
.join(&format!("bin/rust-lldb{}", EXE_SUFFIX));
let rust_gdb = config.cargodir.join(&format!("bin/rust-gdb{}", EXE_SUFFIX));
assert!(!rustc.exists());
assert!(!rustdoc.exists());
assert!(!cargo.exists());
assert!(!rust_lldb.exists());
assert!(!rust_gdb.exists());
});
}
// On windows rustup self uninstall temporarily puts a rustup-gc-$randomnumber.exe
// file in CONFIG.CARGODIR/.. ; check that it doesn't exist.
#[test]
fn uninstall_doesnt_leave_gc_file() {
use std::thread;
use std::time::Duration;
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
// The gc removal happens after rustup terminates. Give it a moment.
thread::sleep(Duration::from_millis(100));
let parent = config.cargodir.parent().unwrap();
// Actually, there just shouldn't be any files here
for dirent in fs::read_dir(parent).unwrap() {
let dirent = dirent.unwrap();
println!("{}", dirent.path().display());
panic!();
}
})
}
#[test]
#[ignore]
fn | () {}
#[cfg(unix)]
fn install_adds_path_to_rc(rcfile: &str) {
setup(&|config| {
let my_rc = "foo\nbar\nbaz";
let rc = config.homedir.join(rcfile);
raw::write_file(&rc, my_rc).unwrap();
expect_ok(config, &["rustup-init", "-y"]);
let new_rc = raw::read_file(&rc).unwrap();
let addition = format!(r#"export PATH="{}/bin:$PATH""#, config.cargodir.display());
let expected = format!("{}\n{}\n", my_rc, addition);
assert_eq!(new_rc, expected);
});
}
#[test]
#[cfg(unix)]
fn install_adds_path_to_profile() {
install_adds_path_to_rc(".profile");
}
#[test]
#[cfg(unix)]
fn install_adds_path_to_bash_profile() {
install_adds_path_to_rc(".bash_profile");
}
#[test]
#[cfg(unix)]
fn install_does_not_add_path_to_bash_profile_that_doesnt_exist() {
setup(&|config| {
let rc = config.homedir.join(".bash_profile");
expect_ok(config, &["rustup-init", "-y"]);
assert!(!rc.exists());
});
}
#[test]
#[cfg(unix)]
fn install_with_zsh_adds_path_to_zprofile() {
setup(&|config| {
let my_rc = "foo\nbar\nbaz";
let rc = config.homedir.join(".zprofile");
raw::write_file(&rc, my_rc).unwrap();
let mut cmd = clitools::cmd(config, "rustup-init", &["-y"]);
cmd.env("SHELL", "zsh");
assert!(cmd.output().unwrap().status.success());
let new_rc = raw::read_file(&rc).unwrap();
let addition = format!(r#"export PATH="{}/bin:$PATH""#, config.cargodir.display());
let expected = format!("{}\n{}\n", my_rc, addition);
assert_eq!(new_rc, expected);
});
}
#[test]
#[cfg(unix)]
fn install_with_zsh_adds_path_to_zdotdir_zprofile() {
setup(&|config| {
let zdotdir = TempDir::new("zdotdir").unwrap();
let my_rc = "foo\nbar\nbaz";
let rc = zdotdir.path().join(".zprofile");
raw::write_file(&rc, my_rc).unwrap();
let mut cmd = clitools::cmd(config, "rustup-init", &["-y"]);
cmd.env("SHELL", "zsh");
cmd.env("ZDOTDIR", zdotdir.path());
assert!(cmd.output().unwrap().status.success());
let new_rc = raw::read_file(&rc).unwrap();
let addition = format!(r#"export PATH="{}/bin:$PATH""#, config.cargodir.display());
let expected = format!("{}\n{}\n", my_rc, addition);
assert_eq!(new_rc, expected);
});
}
#[test]
#[cfg(unix)]
fn install_adds_path_to_rcfile_just_once() {
setup(&|config| {
let my_profile = "foo\nbar\nbaz";
let profile = config.homedir.join(".profile");
raw::write_file(&profile, my_profile).unwrap();
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup-init", "-y"]);
let new_profile = raw::read_file(&profile).unwrap();
let addition = format!(r#"export PATH="{}/bin:$PATH""#, config.cargodir.display());
let expected = format!("{}\n{}\n", my_profile, addition);
assert_eq!(new_profile, expected);
});
}
#[cfg(unix)]
fn uninstall_removes_path_from_rc(rcfile: &str) {
setup(&|config| {
let my_rc = "foo\nbar\nbaz";
let rc = config.homedir.join(rcfile);
raw::write_file(&rc, my_rc).unwrap();
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let new_rc = raw::read_file(&rc).unwrap();
assert_eq!(new_rc, my_rc);
});
}
#[test]
#[cfg(unix)]
fn uninstall_removes_path_from_profile() {
uninstall_removes_path_from_rc(".profile");
}
#[test]
#[cfg(unix)]
fn uninstall_removes_path_from_bash_profile() {
uninstall_removes_path_from_rc(".bash_profile");
}
#[test]
#[cfg(unix)]
fn uninstall_doesnt_touch_rc_files_that_dont_contain_cargo_home() {
setup(&|config| {
let my_rc = "foo\nbar\nbaz";
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let profile = config.homedir.join(".profile");
raw::write_file(&profile, my_rc).unwrap();
let profile = raw::read_file(&profile).unwrap();
assert_eq!(profile, my_rc);
});
}
// In the default case we want to write $HOME/.cargo/bin as the path,
// not the full path.
#[test]
#[cfg(unix)]
fn when_cargo_home_is_the_default_write_path_specially() {
setup(&|config| {
// Override the test harness so that cargo home looks like
// $HOME/.cargo by removing CARGO_HOME from the environment,
// otherwise the literal path will be written to the file.
let my_profile = "foo\nbar\nbaz";
let profile = config.homedir.join(".profile");
raw::write_file(&profile, my_profile).unwrap();
let mut cmd = clitools::cmd(config, "rustup-init", &["-y"]);
cmd.env_remove("CARGO_HOME");
assert!(cmd.output().unwrap().status.success());
let new_profile = raw::read_file(&profile).unwrap();
let expected = format!("{}\nexport PATH=\"$HOME/.cargo/bin:$PATH\"\n", my_profile);
assert_eq!(new_profile, expected);
let mut cmd = clitools::cmd(config, "rustup", &["self", "uninstall", "-y"]);
cmd.env_remove("CARGO_HOME");
assert!(cmd.output().unwrap().status.success());
let new_profile = raw::read_file(&profile).unwrap();
assert_eq!(new_profile, my_profile);
});
}
#[test]
#[cfg(windows)]
fn install_adds_path() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let path = config.cargodir.join("bin").to_string_lossy().to_string();
assert!(get_path().unwrap().contains(&path));
});
}
#[test]
#[cfg(windows)]
fn install_does_not_add_path_twice() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup-init", "-y"]);
let path = config.cargodir.join("bin").to_string_lossy().to_string();
assert_eq!(get_path().unwrap().matches(&path).count(), 1);
});
}
#[test]
#[cfg(windows)]
fn uninstall_removes_path() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let path = config.cargodir.join("bin").to_string_lossy().to_string();
assert!(!get_path().unwrap().contains(&path));
});
}
#[test]
#[cfg(unix)]
fn install_doesnt_modify_path_if_passed_no_modify_path() {
setup(&|config| {
let profile = config.homedir.join(".profile");
expect_ok(config, &["rustup-init", "-y", "--no-modify-path"]);
assert!(!profile.exists());
});
}
#[test]
#[cfg(windows)]
fn install_doesnt_modify_path_if_passed_no_modify_path() {
use winreg::enums::{HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::RegKey;
setup(&|config| {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let old_path = environment.get_raw_value("PATH").unwrap();
expect_ok(config, &["rustup-init", "-y", "--no-modify-path"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let new_path = environment.get_raw_value("PATH").unwrap();
assert!(old_path == new_path);
});
}
#[test]
fn update_exact() {
let version = env!("CARGO_PKG_VERSION");
let expected_output = &(r"info: checking for self-updates
info: downloading self-update
info: rustup updated successfully to "
.to_owned()
+ version
+ "
");
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok_ex(config, &["rustup", "self", "update"], r"", expected_output)
});
}
#[test]
fn update_but_not_installed() {
update_setup(&|config, _| {
expect_err_ex(
config,
&["rustup", "self", "update"],
r"",
&format!(
r"error: rustup is not installed at '{}'
",
config.cargodir.display()
),
);
});
}
#[test]
fn update_but_delete_existing_updater_first() {
update_setup(&|config, _| {
// The updater is stored in a known location
let setup = config
.cargodir
.join(&format!("bin/rustup-init{}", EXE_SUFFIX));
expect_ok(config, &["rustup-init", "-y"]);
// If it happens to already exist for some reason it
// should just be deleted.
raw::write_file(&setup, "").unwrap();
expect_ok(config, &["rustup", "self", "update"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
assert!(rustup.exists());
});
}
#[test]
fn update_download_404() {
update_setup(&|config, self_dist| {
expect_ok(config, &["rustup-init", "-y"]);
let trip = this_host_triple();
let dist_dir = self_dist.join(&format!("archive/{}/{}", TEST_VERSION, trip));
let dist_exe = dist_dir.join(&format!("rustup-init{}", EXE_SUFFIX));
fs::remove_file(dist_exe).unwrap();
expect_err(
config,
&["rustup", "self", "update"],
"could not download file",
);
});
}
#[test]
fn update_bogus_version() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_err(
config,
&["rustup", "update", "1.0.0-alpha"],
"could not download nonexistent rust version `1.0.0-alpha`",
);
});
}
// Check that rustup.exe has changed after the update. This
// is hard for windows because the running process needs to exit
// before the new updater can delete it.
#[test]
fn update_updates_rustup_bin() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
let bin = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let before_hash = calc_hash(&bin);
// Running the self update command on the installed binary,
// so that the running binary must be replaced.
let mut cmd = Command::new(&bin);
cmd.args(&["self", "update"]);
clitools::env(config, &mut cmd);
let out = cmd.output().unwrap();
println!("out: {}", String::from_utf8(out.stdout).unwrap());
println!("err: {}", String::from_utf8(out.stderr).unwrap());
assert!(out.status.success());
let after_hash = calc_hash(&bin);
assert_ne!(before_hash, after_hash);
});
}
#[test]
fn update_bad_schema() {
update_setup(&|config, self_dist| {
expect_ok(config, &["rustup-init", "-y"]);
output_release_file(self_dist, "17", "1.1.1");
expect_err(
config,
&["rustup", "self", "update"],
"unknown schema version",
);
});
}
#[test]
fn update_no_change() {
let version = env!("CARGO_PKG_VERSION");
update_setup(&|config, self_dist| {
expect_ok(config, &["rustup-init", "-y"]);
output_release_file(self_dist, "1", version);
expect_ok_ex(
config,
&["rustup", "self", "update"],
r"",
r"info: checking for self-updates
",
);
});
}
#[test]
fn rustup_self_updates() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
let bin = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let before_hash = calc_hash(&bin);
expect_ok(config, &["rustup", "update"]);
let after_hash = calc_hash(&bin);
assert_ne!(before_hash, after_hash);
})
}
#[test]
fn rustup_self_updates_with_specified_toolchain() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
let bin = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let before_hash = calc_hash(&bin);
expect_ok(config, &["rustup", "update", "stable"]);
let after_hash = calc_hash(&bin);
assert_ne!(before_hash, after_hash);
})
}
#[test]
fn rustup_no_self_update_with_specified_toolchain() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
let bin = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
let before_hash = calc_hash(&bin);
expect_ok(config, &["rustup", "update", "stable", "--no-self-update"]);
let after_hash = calc_hash(&bin);
assert_eq!(before_hash, after_hash);
})
}
#[test]
fn rustup_self_update_exact() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok_ex(
config,
&["rustup", "update"],
for_host!(
r"
stable-{0} unchanged - 1.1.0 (hash-s-2)
"
),
for_host!(
r"info: syncing channel updates for 'stable-{0}'
info: checking for self-updates
info: downloading self-update
"
),
);
})
}
// Because self-delete on windows is hard, rustup-init doesn't
// do it. It instead leaves itself installed for cleanup by later
// invocations of rustup.
#[test]
fn updater_leaves_itself_for_later_deletion() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "update", "nightly"]);
expect_ok(config, &["rustup", "self", "update"]);
let setup = config
.cargodir
.join(&format!("bin/rustup-init{}", EXE_SUFFIX));
assert!(setup.exists());
});
}
#[test]
fn updater_is_deleted_after_running_rustup() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "update", "nightly"]);
expect_ok(config, &["rustup", "self", "update"]);
expect_ok(config, &["rustup", "update", "nightly", "--no-self-update"]);
let setup = config
.cargodir
.join(&format!("bin/rustup-init{}", EXE_SUFFIX));
assert!(!setup.exists());
});
}
#[test]
fn updater_is_deleted_after_running_rustc() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "default", "nightly"]);
expect_ok(config, &["rustup", "self", "update"]);
expect_ok(config, &["rustc", "--version"]);
let setup = config
.cargodir
.join(&format!("bin/rustup-init{}", EXE_SUFFIX));
assert!(!setup.exists());
});
}
#[test]
fn rustup_still_works_after_update() {
update_setup(&|config, _| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "default", "nightly"]);
expect_ok(config, &["rustup", "self", "update"]);
expect_stdout_ok(config, &["rustc", "--version"], "hash-n-2");
expect_ok(config, &["rustup", "default", "beta"]);
expect_stdout_ok(config, &["rustc", "--version"], "hash-b-2");
});
}
// There's a race condition between the updater replacing
// the rustup binary and tool hardlinks and subsequent
// invocations of rustup and rustc (on windows).
#[test]
#[ignore]
fn update_stress_test() {}
// The installer used to be called rustup-setup. For compatibility it
// still needs to work in that mode.
#[test]
#[cfg(not(windows))]
fn as_rustup_setup() {
update_setup(&|config, _| {
let init = config.exedir.join(format!("rustup-init{}", EXE_SUFFIX));
let setup = config.exedir.join(format!("rustup-setup{}", EXE_SUFFIX));
fs::copy(&init, &setup).unwrap();
expect_ok(config, &["rustup-setup", "-y"]);
});
}
#[test]
fn first_install_exact() {
setup(&|config| {
expect_ok_contains(
config,
&["rustup-init", "-y"],
r"
stable installed - 1.1.0 (hash-s-2)
",
for_host!(
r"info: syncing channel updates for 'stable-{0}'
info: latest update on 2015-01-02, rust version 1.1.0
info: downloading component 'rust-std'
info: downloading component 'rustc'
info: downloading component 'cargo'
info: downloading component 'rust-docs'
info: installing component 'rust-std'
info: installing component 'rustc'
info: installing component 'cargo'
info: installing component 'rust-docs'
info: default toolchain set to 'stable'
"
),
);
});
}
#[test]
fn reinstall_exact() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_stderr_ok(
config,
&["rustup-init", "-y"],
r"info: updating existing rustup installation
",
);
});
}
#[test]
#[cfg(unix)]
fn produces_env_file_on_unix() {
setup(&|config| {
// Override the test harness so that cargo home looks like
// $HOME/.cargo by removing CARGO_HOME from the environment,
// otherwise the literal path will be written to the file.
let mut cmd = clitools::cmd(config, "rustup-init", &["-y"]);
cmd.env_remove("CARGO_HOME");
assert!(cmd.output().unwrap().status.success());
let envfile = config.homedir.join(".cargo/env");
let envfile = raw::read_file(&envfile).unwrap();
assert!(envfile.contains(r#"export PATH="$HOME/.cargo/bin:$PATH""#));
});
}
#[test]
#[cfg(windows)]
fn doesnt_produce_env_file_on_windows() {}
#[test]
fn install_sets_up_stable() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_stdout_ok(config, &["rustc", "--version"], "hash-s-2");
});
}
#[test]
fn install_sets_up_stable_unless_a_different_default_is_requested() {
setup(&|config| {
expect_ok(
config,
&["rustup-init", "-y", "--default-toolchain", "nightly"],
);
expect_stdout_ok(config, &["rustc", "--version"], "hash-n-2");
});
}
#[test]
fn install_sets_up_stable_unless_there_is_already_a_default() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_ok(config, &["rustup", "default", "nightly"]);
expect_ok(config, &["rustup", "toolchain", "remove", "stable"]);
expect_ok(config, &["rustup-init", "-y"]);
expect_stdout_ok(config, &["rustc", "--version"], "hash-n-2");
expect_err(
config,
&["rustup", "run", "stable", "rustc", "--version"],
for_host!("toolchain 'stable-{0}' is not installed"),
);
});
}
#[test]
fn readline_no_stdin() {
setup(&|config| {
expect_err(
config,
&["rustup-init"],
"unable to read from stdin for confirmation",
);
});
}
#[test]
fn rustup_init_works_with_weird_names() {
// Browsers often rename bins to e.g. rustup-init(2).exe.
setup(&|config| {
let old = config.exedir.join(&format!("rustup-init{}", EXE_SUFFIX));
let new = config.exedir.join(&format!("rustup-init(2){}", EXE_SUFFIX));
utils::rename_file("test", &old, &new, &|_: Notification<'_>| {}).unwrap();
expect_ok(config, &["rustup-init(2)", "-y"]);
let rustup = config.cargodir.join(&format!("bin/rustup{}", EXE_SUFFIX));
assert!(rustup.exists());
});
}
// # 261
#[test]
#[cfg(windows)]
fn doesnt_write_wrong_path_type_to_reg() {
use winreg::enums::{RegType, HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::RegKey;
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.vtype == RegType::REG_EXPAND_SZ);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.vtype == RegType::REG_EXPAND_SZ);
});
}
// HKCU\Environment\PATH may not exist during install, and it may need to be
// deleted during uninstall if we remove the last path from it
#[test]
#[cfg(windows)]
fn windows_handle_empty_path_registry_key() {
use winreg::enums::{RegType, HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::RegKey;
setup(&|config| {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let _ = environment.delete_value("PATH");
expect_ok(config, &["rustup-init", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.vtype == RegType::REG_EXPAND_SZ);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH");
assert!(path.is_err());
});
}
#[test]
#[cfg(windows)]
fn windows_uninstall_removes_semicolon_from_path() {
use winreg::enums::{RegType, HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::RegKey;
setup(&|config| {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
// This time set the value of PATH and make sure it's restored exactly after uninstall,
// not leaving behind any semi-colons
environment.set_value("PATH", &"foo").unwrap();
expect_ok(config, &["rustup-init", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.vtype == RegType::REG_EXPAND_SZ);
expect_ok(config, &["rustup", "self", "uninstall", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path: String = environment.get_value("PATH").unwrap();
assert!(path == "foo");
});
}
#[test]
#[cfg(windows)]
fn install_doesnt_mess_with_a_non_unicode_path() {
use winreg::enums::{RegType, HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::{RegKey, RegValue};
setup(&|config| {
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let reg_value = RegValue {
bytes: vec![
0x00, 0xD8, // leading surrogate
0x01, 0x01, // bogus trailing surrogate
0x00, 0x00,
], // null
vtype: RegType::REG_EXPAND_SZ,
};
environment.set_raw_value("PATH", ®_value).unwrap();
expect_stderr_ok(config, &["rustup-init", "-y"],
"the registry key HKEY_CURRENT_USER\\Environment\\PATH does not contain valid Unicode. \
Not modifying the PATH variable");
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.bytes == reg_value.bytes);
});
}
#[test]
#[cfg(windows)]
fn uninstall_doesnt_mess_with_a_non_unicode_path() {
use winreg::enums::{RegType, HKEY_CURRENT_USER, KEY_READ, KEY_WRITE};
use winreg::{RegKey, RegValue};
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let reg_value = RegValue {
bytes: vec![
0x00, 0xD8, // leading surrogate
0x01, 0x01, // bogus trailing surrogate
0x00, 0x00,
], // null
vtype: RegType::REG_EXPAND_SZ,
};
environment.set_raw_value("PATH", ®_value).unwrap();
expect_stderr_ok(config, &["rustup", "self", "uninstall", "-y"],
"the registry key HKEY_CURRENT_USER\\Environment\\PATH does not contain valid Unicode. \
Not modifying the PATH variable");
let root = RegKey::predef(HKEY_CURRENT_USER);
let environment = root
.open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE)
.unwrap();
let path = environment.get_raw_value("PATH").unwrap();
assert!(path.bytes == reg_value.bytes);
});
}
#[test]
#[ignore] // untestable
fn install_but_rustup_is_installed() {}
#[test]
#[ignore] // untestable
fn install_but_rustc_is_installed() {}
#[test]
fn install_but_rustup_sh_is_installed() {
setup(&|config| {
let rustup_dir = config.homedir.join(".rustup");
fs::create_dir_all(&rustup_dir).unwrap();
let version_file = rustup_dir.join("rustup-version");
raw::write_file(&version_file, "").unwrap();
expect_err(
config,
&["rustup-init", "-y"],
"cannot install while rustup.sh is installed",
);
});
}
#[test]
fn rls_proxy_set_up_after_install() {
setup(&|config| {
expect_ok(config, &["rustup-init", "-y"]);
expect_err(
config,
&["rls", "--version"],
&format!(
"'rls{}' is not installed for the toolchain 'stable-{}'",
EXE_SUFFIX,
this_host_triple(),
),
);
expect_ok(config, &["rustup", "component", "add", "rls"]);
expect_ok(config, &["rls", "--version"]);
});
}
#[test]
fn rls_proxy_set_up_after_update() {
update_setup(&|config, _| {
let rls_path = config.cargodir.join(format!("bin/rls{}", EXE_SUFFIX));
expect_ok(config, &["rustup-init", "-y"]);
fs::remove_file(&rls_path).unwrap();
expect_ok(config, &["rustup", "self", "update"]);
assert!(rls_path.exists());
});
}
#[test]
fn update_does_not_overwrite_rustfmt() {
update_setup(&|config, self_dist| {
expect_ok(config, &["rustup-init", "-y"]);
let version = env!("CARGO_PKG_VERSION");
output_release_file(self_dist, "1", version);
// Since we just did a fresh install rustfmt will exist. Let's emulate
// it not existing in this test though by removing it just after our
// installation.
let rustfmt_path = config.cargodir.join(format!("bin/rustfmt{}", EXE_SUFFIX));
assert!(rustfmt_path.exists());
fs::remove_file(&rustfmt_path).unwrap();
raw::write_file(&rustfmt_path, "").unwrap();
assert_eq!(utils::file_size(&rustfmt_path).unwrap(), 0);
// Ok, now a self-update should complain about `rustfmt` not looking
// like rustup and the user should take some action.
expect_stderr_ok(
config,
&["rustup", "self", "update"],
"`rustfmt` is already installed",
);
assert!(rustfmt_path.exists());
assert_eq!(utils::file_size(&rustfmt_path).unwrap(), 0);
// Now simluate us removing the rustfmt executable and rerunning a self
// update, this should install the rustup shim. Note that we don't run
// `rustup` here but rather the rustup we've actually installed, this'll
// help reproduce bugs related to having that file being opened by the
// current process.
fs::remove_file(&rustfmt_path).unwrap();
let installed_rustup = config.cargodir.join("bin/rustup");
expect_ok(
config,
&[installed_rustup.to_str().unwrap(), "self", "update"],
);
assert!(rustfmt_path.exists());
assert!(utils::file_size(&rustfmt_path).unwrap() > 0);
});
}
#[test]
fn update_installs_clippy_cargo_and() {
update_setup(&|config, self_dist| {
expect_ok(config, &["rustup-init", "-y"]);
let version = env!("CARGO_PKG_VERSION");
output_release_file(self_dist, "1", version);
let cargo_clippy_path = config
.cargodir
.join(format!("bin/cargo-clippy{}", EXE_SUFFIX));
assert!(cargo_clippy_path.exists());
});
}
| uninstall_stress_test |
test_api_controller.py | # -*- coding: utf-8 -*-
"""
tests.controllers.test_api_controller
"""
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
# Add Single Event via Injestion API
def test_add_event(self):
# Parameters for the API call
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
# Perform the API call through the SDK function
self.controller.create_event(event_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Events via Ingestion API
def test_add_batched_events(self):
# Parameters for the API call
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body: | self.controller.create_events_batch(body)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Single User via Injestion API
def test_update_user(self):
# Parameters for the API call
metadata = APIHelper.json_deserialize(""" {
"email": "[email protected]",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
# Perform the API call through the SDK function
self.controller.update_user(user_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Batched Users via Ingestion API
def test_update_users_batch(self):
# Parameter for the API call
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "[email protected]",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_users_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Get Application configuration
def test_get_app_config(self):
# Perform the API call through the SDK function
response = self.controller.get_app_config().__dict__
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
# Add Single company via Injestion API
def test_update_company(self):
# Parameter for the API call
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
# Perform the API call through the SDK function
self.controller.update_company(company_model)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Companies via Ingestion API
def test_update_companies_batch(self):
# Parameter for the API call
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_companies_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201) | val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
# Perform the API call through the SDK function |
rnvp.py | import tensorflow as tf
class RNVP(tf.Module):
"""Affine half (aka Real Non-Volume Preserving) flow (x = z * exp(s) + t),
where a randomly selected half z1 of the dimensions in z are transformed as an
affine function of the other half z2, i.e. scaled by s(z2) and shifted by t(z2).
From "Density estimation using Real NVP", Dinh et al. (May 2016)
https://arxiv.org/abs/1605.08803
This implementation uses the numerically stable updates introduced by IAF:
https://arxiv.org/abs/1606.04934
"""
def __init__(self, dim, h_sizes=[30], activation="tanh", **kwargs):
|
def forward(self, z): # z -> x
# Get random Bernoulli mask. This decides which channels will remain
# unchanged and which will be transformed as functions of the unchanged.
mask = tf.keras.backend.random_binomial(tf.shape(z), p=0.5)
z1, z2 = (1 - mask) * z, mask * z
y = self.net(z2)
shift = self.t(y)
scale = self.s(y)
# sigmoid(x) = 1 / (1 + exp(-x)). For x in (-inf, inf) => sigmoid(x) in (0, 1).
gate = tf.sigmoid(scale)
log_dets = tf.reduce_sum((1 - mask) * tf.math.log(gate), axis=1)
x = (z1 * gate + (1 - gate) * shift) + z2
return x, log_dets
| super().__init__(**kwargs)
layers = [tf.keras.layers.Dense(hs, activation) for hs in h_sizes]
self.net = tf.keras.Sequential(layers)
self.t = tf.keras.layers.Dense(dim)
self.s = tf.keras.layers.Dense(dim) |
image.go | // +build extralibs
package main
import (
"gopkg.in/h2non/bimg.v1"
"io"
"io/ioutil"
)
func | (source io.Reader, target io.Writer, width, height int) error {
buffer, err := ioutil.ReadAll(source)
if err != nil {
return err
}
newImage, err := bimg.NewImage(buffer).Process(bimg.Options{
Type: bimg.PNG,
Width: width,
Height: height,
Background: bimg.Color{255, 255, 255},
Embed: true,
Crop: true,
})
if err != nil {
return err
}
target.Write(newImage)
return nil
}
| genImagePreview |
vk_physical_device_16_bit_storage_features.rs | // Generated by `scripts/generate.js`
pub type VkPhysicalDevice16BitStorageFeatures = super::super::vk::VkPhysicalDevice16BitStorageFeatures; | pub type RawVkPhysicalDevice16BitStorageFeatures = super::super::vk::RawVkPhysicalDevice16BitStorageFeatures; |
#[doc(hidden)] |
arch-armv8.rs | pub struct Backtrace(u8);
impl Backtrace {
pub fn new() -> Backtrace { | fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
Ok( () )
}
} | Backtrace(0)
}
}
impl ::core::fmt::Debug for Backtrace { |
mesh.rs | use std::mem::size_of;
use eyre::{eyre, Result};
use gl::types::GLenum;
use glam::{Vec2, Vec3, Vec4};
use gltf::{
image::Format,
mesh::util::ReadIndices,
texture::{MagFilter, MinFilter, WrappingMode},
};
use crate::ogl;
use super::DataBundle;
/// Gltf terminology is needlessly confusing.
/// A gltf 'Mesh' contains multiple real sub-meshes (called Primitives in the gltf parlance)
pub struct Mesh {
/// 'Primitives' of the 'mesh'
// TODO: could be optimized - most meshes probably only contain a single primitive - avoid allocating a vector
pub primitives: Vec<Primitive>,
/// Name of the 'Mesh'
pub name: Option<String>,
}
impl Mesh {
/// Create a mesh from the gltf::Mesh struct and the DataBundle
pub fn from_gltf(mesh: &gltf::Mesh, bundle: &mut DataBundle) -> Result<Self> {
let name = mesh.name().map(|n| n.to_owned());
let mut primitives = Vec::new();
for primitive in mesh.primitives() {
let primitive = Primitive::from_gltf(&primitive, bundle)?;
primitives.push(primitive);
}
Ok(Mesh { primitives, name })
}
}
/// A Primitive represents a single 'mesh' in the normal meaning of that word
/// (a collection of vertices with a specific topology like Triangles or Lines).
///
// TODO: It's not needed to store all this data in RAM.
// TODO: load vertex data without allocation and copying
pub struct Primitive {
/// A texture (if any) of this mesh
pub texture_info: PrimitiveTexture,
/// OpenGL VAO identifier
pub vao: u32,
/// Vertex indices
pub indices: Indices,
/// Vertex positions
pub positions: Vec<Vec3>,
/// Vertex texture coordinates
pub texcoords: Vec<Vec2>,
/// Vertex normals
pub normals: Vec<Vec3>,
/// Vertex skin data (joints indices, weights)
pub skin: Option<PrimSkin>,
}
impl Primitive {
/// Creates the primitive from the gltf::Primitive struct and the DataBundle
pub fn from_gltf(primitive: &gltf::Primitive, bundle: &mut DataBundle) -> Result<Self> {
let mode = primitive.mode();
if mode != gltf::mesh::Mode::Triangles {
return Err(eyre!("primitive mode: '{mode:?}' is not impelemnted"));
}
let reader = primitive.reader(|buffer| Some(&bundle.buffers[buffer.index()]));
let positions = reader
.read_positions()
.ok_or(eyre!("primitive doesn't containt positions"))?
.map(Vec3::from)
.collect();
let indices = match reader
.read_indices()
.ok_or(eyre!("primitive doesn't containt indices"))?
{
ReadIndices::U32(b) => Indices::U32(b.collect()),
ReadIndices::U16(b) => Indices::U16(b.collect()),
ReadIndices::U8(b) => Indices::U8(b.collect()),
};
let mut texcoords = Vec::new();
let mut texture_set = 0;
while let Some(texcoords_reader) = reader.read_tex_coords(texture_set) {
if texture_set >= 1 {
// Used for loading textures other than the diffuse map
//eprintln!("WARN: primitive has more than 1 texture coordinate set");
break;
}
texcoords = texcoords_reader.into_f32().map(Vec2::from).collect();
texture_set += 1;
}
let normals = reader
.read_normals()
.ok_or(eyre!("primitive doesn't containt normals"))?
.map(Vec3::from)
.collect();
let skin = match (reader.read_joints(0), reader.read_weights(0)) {
(Some(joints), Some(weights)) => {
let joints = joints.into_u16().map(|j| j.map(|ji| ji as u32)).collect();
// TODO: u8 / u16 joint weights normalization
match weights {
gltf::mesh::util::ReadWeights::U8(_) => todo!("U8 weights"),
gltf::mesh::util::ReadWeights::U16(_) => todo!("U16 weights"),
_ => {}
}
let weights = weights.into_f32().collect();
Some(PrimSkin::new(joints, weights))
}
_ => None,
};
let material = primitive.material();
let mut primitive = Self {
vao: 0,
texture_info: PrimitiveTexture::None {
base_color_factor: Vec4::splat(1.),
},
indices,
positions,
texcoords,
normals,
skin,
};
primitive.create_buffers(&material, bundle);
if primitive.vao == 0 {
return Err(eyre!("primitive VAO wasn't correctly initialized"));
}
Ok(primitive)
} | /// Creates the OpenGL buffer from the loaded vertex data
fn create_buffers(&mut self, material: &gltf::Material, bundle: &mut DataBundle) {
let mut indices = 0;
let mut vao = 0;
unsafe {
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
let _positions = ogl::create_float_buf(&self.positions, 3, ogl::POS_INDEX, gl::FLOAT);
let _texcoords =
ogl::create_float_buf(&self.texcoords, 2, ogl::TEXCOORDS_INDEX, gl::FLOAT);
let _normals = ogl::create_float_buf(&self.normals, 3, ogl::NORMALS_INDEX, gl::FLOAT);
if let Some(skin) = &self.skin {
let _joints =
ogl::create_int_buf(&skin.joints, 4, ogl::JOINTS_INDEX, gl::UNSIGNED_INT);
let _weights =
ogl::create_float_buf(&skin.weights, 4, ogl::WEIGHTS_INDEX, gl::FLOAT);
}
// Indices
gl::GenBuffers(1, &mut indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, indices);
gl::BufferData(
gl::ELEMENT_ARRAY_BUFFER,
self.indices.size() as isize,
self.indices.ptr(),
gl::STATIC_DRAW,
);
let pbr = material.pbr_metallic_roughness();
let texture_index = match pbr.base_color_texture() {
Some(tex_info) => {
self.create_texture(&tex_info.texture(), pbr.base_color_factor(), bundle)
}
None => {
let base_color_factor = Vec4::from(pbr.base_color_factor());
PrimitiveTexture::None { base_color_factor }
}
};
// Unbind buffers
gl::BindVertexArray(0);
gl::BindBuffer(gl::ARRAY_BUFFER, 0);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0);
gl::BindTexture(gl::TEXTURE_2D, 0);
self.vao = vao;
self.texture_info = texture_index;
}
}
/// Creates a new OpenGL texture.
///
/// If the texture already exists (bundle.gl_textures\[texture_index\] == Some(...)),
/// no new texture is created, only the Texture struct is cloned.
fn create_texture(
&mut self,
tex: &gltf::Texture,
base_color_factor: [f32; 4],
bundle: &mut DataBundle,
) -> PrimitiveTexture {
let tex_index = tex.source().index();
if let Some(texture) = bundle.gl_textures[tex_index].clone() {
return texture;
}
let gl_tex_id = unsafe {
let mut texture = 0;
gl::GenTextures(1, &mut texture);
gl::BindTexture(gl::TEXTURE_2D, texture);
self.set_texture_sampler(&tex.sampler());
let image = &bundle.images[tex_index];
assert!(image.width.is_power_of_two());
assert!(image.height.is_power_of_two());
let (internal_format, format) = match image.format {
Format::R8G8 => (gl::RG8, gl::RG),
Format::R8G8B8 => (gl::RGB8, gl::RGB),
Format::R8G8B8A8 => (gl::RGBA8, gl::RGBA),
f => unimplemented!("Unimplemented image format: '{f:?}'"),
};
gl::TexImage2D(
gl::TEXTURE_2D,
0,
internal_format as i32,
image.width as i32,
image.height as i32,
0,
format,
gl::UNSIGNED_BYTE,
image.pixels.as_ptr() as _,
);
gl::GenerateMipmap(gl::TEXTURE_2D);
texture
};
let texture = PrimitiveTexture::Some {
gl_id: gl_tex_id,
base_color_factor: Vec4::from(base_color_factor),
};
bundle.gl_textures[tex_index] = Some(texture.clone());
texture
}
/// Sets the appropriate sampler functions for the currently created texture.
fn set_texture_sampler(&self, sampler: &gltf::texture::Sampler) {
let min_filter = match sampler.min_filter() {
Some(min_filter) => match min_filter {
MinFilter::Nearest => gl::NEAREST,
MinFilter::Linear => gl::LINEAR,
MinFilter::NearestMipmapNearest => gl::NEAREST_MIPMAP_NEAREST,
MinFilter::LinearMipmapNearest => gl::LINEAR_MIPMAP_NEAREST,
MinFilter::NearestMipmapLinear => gl::NEAREST_MIPMAP_LINEAR,
MinFilter::LinearMipmapLinear => gl::LINEAR_MIPMAP_LINEAR,
},
None => gl::LINEAR_MIPMAP_LINEAR,
};
let mag_filter = match sampler.mag_filter() {
Some(mag_filter) => match mag_filter {
MagFilter::Nearest => gl::NEAREST,
MagFilter::Linear => gl::LINEAR,
},
None => gl::LINEAR,
};
unsafe {
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, min_filter as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, mag_filter as i32);
}
let wrap_s = match sampler.wrap_s() {
WrappingMode::ClampToEdge => gl::CLAMP_TO_EDGE,
WrappingMode::MirroredRepeat => gl::MIRRORED_REPEAT,
WrappingMode::Repeat => gl::REPEAT,
};
let wrap_t = match sampler.wrap_t() {
WrappingMode::ClampToEdge => gl::CLAMP_TO_EDGE,
WrappingMode::MirroredRepeat => gl::MIRRORED_REPEAT,
WrappingMode::Repeat => gl::REPEAT,
};
unsafe {
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, wrap_s as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, wrap_t as i32);
}
}
}
/// Texture info for a primitive.
///
/// If the primitive has a texture, copy the texture information from the Model's gl_textures.
///
/// If not, the base_color_factor serves as the object color.
#[derive(Clone)]
pub enum PrimitiveTexture {
None { base_color_factor: Vec4 },
Some { gl_id: u32, base_color_factor: Vec4 },
}
/// Optional skin data for a primitive.
pub struct PrimSkin {
pub joints: Vec<[u32; 4]>,
pub weights: Vec<[f32; 4]>,
}
impl PrimSkin {
pub fn new(joints: Vec<[u32; 4]>, weights: Vec<[f32; 4]>) -> Self {
Self { joints, weights }
}
}
/// Vertex indices for a primitive.
///
/// Better than using generics here.
pub enum Indices {
U32(Vec<u32>),
U16(Vec<u16>),
U8(Vec<u8>),
}
impl Indices {
/// The size (in bytes) of the buffer
pub fn size(&self) -> usize {
match self {
Indices::U32(buf) => buf.len() * size_of::<u32>(),
Indices::U16(buf) => buf.len() * size_of::<u16>(),
Indices::U8(buf) => buf.len() * size_of::<u8>(),
}
}
/// The lenght (in elements) of the buffer
pub fn len(&self) -> usize {
match self {
Indices::U32(buf) => buf.len(),
Indices::U16(buf) => buf.len(),
Indices::U8(buf) => buf.len(),
}
}
/// A pointer to the start of the buffer
pub fn ptr(&self) -> *const std::ffi::c_void {
match self {
Indices::U32(buf) => buf.as_ptr() as _,
Indices::U16(buf) => buf.as_ptr() as _,
Indices::U8(buf) => buf.as_ptr() as _,
}
}
/// A GL_TYPE corresponding to the variant of the buffer
pub fn gl_type(&self) -> GLenum {
match self {
Indices::U32(_) => gl::UNSIGNED_INT,
Indices::U16(_) => gl::UNSIGNED_SHORT,
Indices::U8(_) => gl::UNSIGNED_BYTE,
}
}
} | |
apps.py | from django.apps import AppConfig
from django.db.models.signals import post_migrate, post_save
from .settings.authentication import DJANGO_AUTH_TYPE
class AuthenticationConfig(AppConfig):
name = 'cvat.apps.authentication'
def ready(self):
from . import signals
from django.contrib.auth.models import User
post_migrate.connect(signals.create_groups) |
import django_auth_ldap.backend
django_auth_ldap.backend.populate_user.connect(signals.update_ldap_groups) |
if DJANGO_AUTH_TYPE == 'SIMPLE':
post_save.connect(signals.create_user, sender=User, dispatch_uid="create_user") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.