file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
faucets.go | package usecases
import (
"context"
"github.com/consensys/orchestrate/pkg/toolkit/app/multitenancy"
"github.com/consensys/orchestrate/src/entities"
ethcommon "github.com/ethereum/go-ethereum/common"
)
//go:generate mockgen -source=faucets.go -destination=mocks/faucets.go -package=mocks
type FaucetUseCases interface {
RegisterFaucet() RegisterFaucetUseCase
UpdateFaucet() UpdateFaucetUseCase
GetFaucet() GetFaucetUseCase
SearchFaucets() SearchFaucetsUseCase
DeleteFaucet() DeleteFaucetUseCase
}
type RegisterFaucetUseCase interface {
Execute(ctx context.Context, faucet *entities.Faucet, userInfo *multitenancy.UserInfo) (*entities.Faucet, error)
}
type UpdateFaucetUseCase interface {
Execute(ctx context.Context, faucet *entities.Faucet, userInfo *multitenancy.UserInfo) (*entities.Faucet, error)
}
type GetFaucetUseCase interface {
Execute(ctx context.Context, uuid string, userInfo *multitenancy.UserInfo) (*entities.Faucet, error)
}
type SearchFaucetsUseCase interface { | type DeleteFaucetUseCase interface {
Execute(ctx context.Context, uuid string, userInfo *multitenancy.UserInfo) error
}
type GetFaucetCandidateUseCase interface {
Execute(ctx context.Context, account ethcommon.Address, chain *entities.Chain, userInfo *multitenancy.UserInfo) (*entities.Faucet, error)
} | Execute(ctx context.Context, filters *entities.FaucetFilters, userInfo *multitenancy.UserInfo) ([]*entities.Faucet, error)
}
|
timebucketedlog_reader.py | from time import time
import six
from six import with_metaclass
from eventsourcing.domain.model.events import QualnameABCMeta
from eventsourcing.domain.model.timebucketedlog import MessageLogged, Timebucketedlog, make_timebucket_id, \
next_bucket_starts, previous_bucket_starts
from eventsourcing.infrastructure.eventstore import AbstractEventStore
from eventsourcing.utils.times import decimaltimestamp
def get_timebucketedlog_reader(log, event_store):
"""
:rtype: TimebucketedlogReader
"""
return TimebucketedlogReader(log=log, event_store=event_store)
class TimebucketedlogReader(with_metaclass(QualnameABCMeta)):
def __init__(self, log, event_store, page_size=50):
assert isinstance(log, Timebucketedlog)
self.log = log
assert isinstance(event_store, AbstractEventStore), event_store
self.event_store = event_store
assert isinstance(page_size, six.integer_types)
self.page_size = page_size
self.position = None
def get_messages(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
events = self.get_events(gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending,
page_size=page_size)
for event in events:
if isinstance(event, MessageLogged):
self.position = event.timestamp
yield event.message
def | (self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
assert limit is None or limit > 0
# Identify the first time bucket.
now = decimaltimestamp()
started_on = self.log.started_on
absolute_latest = min(now, lt or now, lte or now)
absolute_earlyist = max(started_on, gt or 0, gte or 0)
if is_ascending:
position = absolute_earlyist
else:
position = absolute_latest
# Start counting events.
count_events = 0
while True:
bucket_id = make_timebucket_id(self.log.name, position, self.log.bucket_size)
for message_logged_event in self.event_store.get_domain_events(
originator_id=bucket_id,
gt=gt,
gte=gte,
lt=lt,
lte=lte,
limit=limit,
is_ascending=is_ascending,
page_size=page_size,
):
yield message_logged_event
if limit is not None:
count_events += 1
if count_events >= limit:
return
# See if there's another bucket.
if is_ascending:
next_timestamp = next_bucket_starts(position, self.log.bucket_size)
if next_timestamp > absolute_latest:
return
else:
position = next_timestamp
else:
if position < absolute_earlyist:
return
else:
position = previous_bucket_starts(position, self.log.bucket_size)
| get_events |
macro-use-one.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:two_macros.rs
// ignore-stage1
#[macro_use(macro_two)]
extern crate two_macros;
pub fn main() | {
macro_two!();
} |
|
unrecognized-repr.rs | use ref_cast::RefCast;
#[derive(RefCast)]
#[repr(packed, C, usize, usize(0), usize = "0")]
struct Test {
s: String,
}
| fn main() {} |
|
test_data_collator.py | import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import require_torch, slow
if is_torch_available():
|
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt"
PATH_SAMPLE_TEXT_DIR = "./tests/fixtures/tests_samples/wiki_text"
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# Features can already be tensors
features = [{"label": i, "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
# Labels can already be tensors
features = [{"label": torch.tensor(i), "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
@slow
def test_default_classification(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.long)
@slow
def test_default_regression(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="sts-b", data_dir="./tests/fixtures/tests_samples/STS-B", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.float)
@slow
def test_lm_tokenizer_without_padding(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# ^ causal lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
with self.assertRaises(ValueError):
# Expect error due to padding token missing on gpt2:
data_collator(examples)
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_lm_tokenizer_with_padding(self):
tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")
data_collator = DataCollatorForLanguageModeling(tokenizer)
# ^ masked lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 107)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 107)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_plm(self):
tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
# ^ permutation lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 112)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 112)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
example = [torch.randint(5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
@slow
def test_nsp(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
data_collator = DataCollatorForNextSentencePrediction(tokenizer)
dataset = TextDatasetForNextSentencePrediction(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["masked_lm_labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((total_samples,)))
@slow
def test_sop(self):
tokenizer = AutoTokenizer.from_pretrained("albert-base-v2")
data_collator = DataCollatorForSOP(tokenizer)
dataset = LineByLineWithSOPTextDataset(tokenizer, file_dir=PATH_SAMPLE_TEXT_DIR, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((total_samples,)))
| import torch
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForNextSentencePrediction,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSOP,
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
default_data_collator,
) |
test_auto_ApplyTransforms.py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..resampling import ApplyTransforms
| def test_ApplyTransforms_inputs():
input_map = dict(args=dict(argstr='%s',
),
default_value=dict(argstr='--default-value %g',
usedefault=True,
),
dimension=dict(argstr='--dimensionality %d',
),
environ=dict(nohash=True,
usedefault=True,
),
float=dict(argstr='--float %d',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_image=dict(argstr='--input %s',
mandatory=True,
),
input_image_type=dict(argstr='--input-image-type %d',
),
interpolation=dict(argstr='%s',
usedefault=True,
),
interpolation_parameters=dict(),
invert_transform_flags=dict(),
num_threads=dict(nohash=True,
usedefault=True,
),
out_postfix=dict(usedefault=True,
),
output_image=dict(argstr='--output %s',
genfile=True,
hash_files=False,
),
print_out_composite_warp_file=dict(requires=['output_image'],
),
reference_image=dict(argstr='--reference-image %s',
mandatory=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
transforms=dict(argstr='%s',
mandatory=True,
),
)
inputs = ApplyTransforms.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ApplyTransforms_outputs():
output_map = dict(output_image=dict(),
)
outputs = ApplyTransforms.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value | |
get_search_internal_server_error.go | /*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
| type GetSearchInternalServerError struct {
// Internal server error message
Error_ string `json:"error,omitempty"`
} | package swagger
// Internal server error |
outbound_campaigns_interactions.go | package outbound_campaigns_interactions
import (
"fmt"
"github.com/mypurecloud/platform-client-sdk-cli/build/gc/logger"
"github.com/mypurecloud/platform-client-sdk-cli/build/gc/retry"
"github.com/mypurecloud/platform-client-sdk-cli/build/gc/services"
"github.com/mypurecloud/platform-client-sdk-cli/build/gc/utils"
"github.com/mypurecloud/platform-client-sdk-cli/build/gc/models"
"github.com/spf13/cobra"
"net/url"
"strings"
"time"
)
var (
Description = utils.FormatUsageDescription("outbound_campaigns_interactions", "SWAGGER_OVERRIDE_/api/v2/outbound/campaigns/{campaignId}/interactions", )
outbound_campaigns_interactionsCmd = &cobra.Command{
Use: utils.FormatUsageDescription("outbound_campaigns_interactions"),
Short: Description,
Long: Description,
}
CommandService services.CommandService
)
func init() {
CommandService = services.NewCommandService(outbound_campaigns_interactionsCmd)
}
func Cmdoutbound_campaigns_interactions() *cobra.Command {
getCmd.SetUsageTemplate(fmt.Sprintf("%s\nOperation:\n %s %s\n%s\n%s", getCmd.UsageTemplate(), "GET", "/api/v2/outbound/campaigns/{campaignId}/interactions", utils.FormatPermissions([]string{ "outbound:campaign:view", }), utils.GenerateDevCentreLink("GET", "Outbound", "/api/v2/outbound/campaigns/{campaignId}/interactions")))
utils.AddFileFlagIfUpsert(getCmd.Flags(), "GET", ``)
utils.AddPaginateFlagsIfListingResponse(getCmd.Flags(), "GET", `{
"description" : "successful operation",
"schema" : {
"$ref" : "#/definitions/CampaignInteractions"
}
}`)
outbound_campaigns_interactionsCmd.AddCommand(getCmd)
return outbound_campaigns_interactionsCmd
}
var getCmd = &cobra.Command{
Use: "get [campaignId]",
Short: "Get dialer campaign interactions.",
Long: "Get dialer campaign interactions.",
Args: utils.DetermineArgs([]string{ "campaignId", }),
Run: func(cmd *cobra.Command, args []string) {
_ = models.Entities{}
printReqBody, _ := cmd.Flags().GetBool("printrequestbody")
if printReqBody {
return
}
queryParams := make(map[string]string)
path := "/api/v2/outbound/campaigns/{campaignId}/interactions"
campaignId, args := args[0], args[1:]
path = strings.Replace(path, "{campaignId}", fmt.Sprintf("%v", campaignId), -1)
urlString := path
if len(queryParams) > 0 {
urlString = fmt.Sprintf("%v?", path)
for k, v := range queryParams { | }
const opId = "get"
const httpMethod = "GET"
retryFunc := CommandService.DetermineAction(httpMethod, urlString, cmd, opId)
// TODO read from config file
retryConfig := &retry.RetryConfiguration{
RetryWaitMin: 5 * time.Second,
RetryWaitMax: 60 * time.Second,
RetryMax: 20,
}
results, err := retryFunc(retryConfig)
if err != nil {
logger.Fatal(err)
}
utils.Render(results)
},
} | urlString += fmt.Sprintf("%v=%v&", url.QueryEscape(strings.TrimSpace(k)), url.QueryEscape(strings.TrimSpace(v)))
}
urlString = strings.TrimSuffix(urlString, "&") |
test_tm_rpc.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tmrpctest
import (
"fmt"
"io"
"reflect"
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"github.com/golang/protobuf/proto"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/hook"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl/tmutils"
"vitess.io/vitess/go/vt/vttablet/tabletmanager"
"vitess.io/vitess/go/vt/vttablet/tmclient"
querypb "vitess.io/vitess/go/vt/proto/query"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// fakeRPCTM implements tabletmanager.RPCTM and fills in all
// possible values in all APIs
type fakeRPCTM struct {
t *testing.T
panics bool
// slow if true will let Ping() sleep and effectively not respond to an RPC.
slow bool
// mu guards accesses of "slow".
mu sync.Mutex
}
func (fra *fakeRPCTM) LockTables(ctx context.Context) error {
panic("implement me")
}
func (fra *fakeRPCTM) UnlockTables(ctx context.Context) error {
panic("implement me")
}
func (fra *fakeRPCTM) setSlow(slow bool) {
fra.mu.Lock()
fra.slow = slow
fra.mu.Unlock()
}
// NewFakeRPCTM returns a fake tabletmanager.RPCTM that's just a mirror.
func NewFakeRPCTM(t *testing.T) tabletmanager.RPCTM {
return &fakeRPCTM{
t: t,
}
}
// The way this test is organized is a repetition of:
// - static test data for a call
// - implementation of the tabletmanager.RPCTM method for fakeRPCTM
// - static test method for the call (client side)
// for each possible method of the interface.
// This makes the implementations all in the same spot.
var protoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
func compare(t *testing.T, name string, got, want interface{}) {
t.Helper()
typ := reflect.TypeOf(got)
if reflect.TypeOf(got) != reflect.TypeOf(want) {
goto fail
}
switch {
case typ.Implements(protoMessage):
if !proto.Equal(got.(proto.Message), want.(proto.Message)) {
goto fail
}
case typ.Kind() == reflect.Slice && typ.Elem().Implements(protoMessage):
vx, vy := reflect.ValueOf(got), reflect.ValueOf(want)
if vx.Len() != vy.Len() {
goto fail
}
for i := 0; i < vx.Len(); i++ {
if !proto.Equal(vx.Index(i).Interface().(proto.Message), vy.Index(i).Interface().(proto.Message)) {
goto fail
}
}
default:
if !reflect.DeepEqual(got, want) {
goto fail
}
}
return
fail:
t.Errorf("Unexpected %v:\ngot %#v\nwant %#v", name, got, want)
}
func compareBool(t *testing.T, name string, got bool) {
t.Helper()
if !got {
t.Errorf("Unexpected %v: got false expected true", name)
}
}
func compareError(t *testing.T, name string, err error, got, want interface{}) {
t.Helper()
if err != nil {
t.Errorf("%v failed: %v", name, err)
} else {
compare(t, name+" result", got, want)
}
}
var testLogString = "test log"
func logStuff(logger logutil.Logger, count int) {
for i := 0; i < count; i++ {
logger.Infof(testLogString)
}
}
func compareLoggedStuff(t *testing.T, name string, stream logutil.EventStream, count int) error {
t.Helper()
for i := 0; i < count; i++ {
le, err := stream.Recv()
if err != nil {
t.Errorf("No logged value for %v/%v", name, i)
return err
}
if le.Value != testLogString {
t.Errorf("Unexpected log response for %v: got %v expected %v", name, le.Value, testLogString)
}
}
_, err := stream.Recv()
if err == nil {
t.Fatalf("log channel wasn't closed for %v", name)
}
if err == io.EOF {
return nil
}
return err
}
func expectHandleRPCPanic(t *testing.T, name string, verbose bool, err error) {
t.Helper()
expected := fmt.Sprintf("HandleRPCPanic caught panic during %v with verbose %v", name, verbose)
if err == nil || !strings.Contains(err.Error(), expected) {
t.Fatalf("Expected a panic error with '%v' but got: %v", expected, err)
}
}
//
// Various read-only methods
//
func (fra *fakeRPCTM) Ping(ctx context.Context, args string) string {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
fra.mu.Lock()
slow := fra.slow
fra.mu.Unlock()
if slow {
time.Sleep(time.Minute)
}
return args
}
func tmRPCTestPing(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.Ping(ctx, tablet)
if err != nil {
t.Errorf("Ping failed: %v", err)
}
}
func tmRPCTestPingPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.Ping(ctx, tablet)
expectHandleRPCPanic(t, "Ping", false /*verbose*/, err)
}
// tmRPCTestDialExpiredContext verifies that
// the context returns the right DeadlineExceeded Err() for
// RPCs failed due to an expired context before .Dial().
func tmRPCTestDialExpiredContext(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
// Using a timeout of 0 here such that .Dial() will fail immediately.
expiredCtx, cancel := context.WithTimeout(ctx, 0)
defer cancel()
err := client.Ping(expiredCtx, tablet)
if err == nil {
t.Fatal("tmRPCTestDialExpiredContext: RPC with expired context did not fail")
}
// The context was already expired when we created it. Here we only verify that it returns the expected error.
select {
case <-expiredCtx.Done():
if err := expiredCtx.Err(); err != context.DeadlineExceeded {
t.Errorf("tmRPCTestDialExpiredContext: got %v want context.DeadlineExceeded", err)
}
default:
t.Errorf("tmRPCTestDialExpiredContext: context.Done() not closed")
}
}
// tmRPCTestRPCTimeout verifies that
// the context returns the right DeadlineExceeded Err() for
// RPCs failed due to an expired context during execution.
func tmRPCTestRPCTimeout(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, fakeTM *fakeRPCTM) {
// We must use a timeout > 0 such that the context deadline hasn't expired
// yet in grpctmclient.Client.dial().
// NOTE: This might still race e.g. when test execution takes too long the
// context will be expired in dial() already. In such cases coverage
// will be reduced but the test will not flake.
shortCtx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
fakeTM.setSlow(true)
defer func() { fakeTM.setSlow(false) }()
err := client.Ping(shortCtx, tablet)
if err == nil {
t.Fatal("tmRPCTestRPCTimeout: RPC with expired context did not fail")
}
select {
case <-shortCtx.Done():
if err := shortCtx.Err(); err != context.DeadlineExceeded {
t.Errorf("tmRPCTestRPCTimeout: got %v want context.DeadlineExceeded", err)
}
default:
t.Errorf("tmRPCTestRPCTimeout: context.Done() not closed")
}
}
var testGetSchemaTables = []string{"table1", "table2"}
var testGetSchemaExcludeTables = []string{"etable1", "etable2", "etable3"}
var testGetSchemaReply = &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "CREATE DATABASE {{.DatabaseName}}",
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
{
Name: "table_name",
Schema: "create table_name",
Columns: []string{"col1", "col2"},
PrimaryKeyColumns: []string{"col1"},
Type: tmutils.TableView,
DataLength: 12,
RowCount: 6,
},
{
Name: "table_name2",
Schema: "create table_name2",
Columns: []string{"col1"},
PrimaryKeyColumns: []string{"col1"},
Type: tmutils.TableBaseTable,
DataLength: 12,
RowCount: 6,
},
},
Version: "xxx",
}
func (fra *fakeRPCTM) GetSchema(ctx context.Context, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "GetSchema tables", tables, testGetSchemaTables)
compare(fra.t, "GetSchema excludeTables", excludeTables, testGetSchemaExcludeTables)
compareBool(fra.t, "GetSchema includeViews", includeViews)
return testGetSchemaReply, nil
}
func tmRPCTestGetSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
result, err := client.GetSchema(ctx, tablet, testGetSchemaTables, testGetSchemaExcludeTables, true)
compareError(t, "GetSchema", err, result, testGetSchemaReply)
}
func tmRPCTestGetSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.GetSchema(ctx, tablet, testGetSchemaTables, testGetSchemaExcludeTables, true)
expectHandleRPCPanic(t, "GetSchema", false /*verbose*/, err)
}
var testGetPermissionsReply = &tabletmanagerdatapb.Permissions{
UserPermissions: []*tabletmanagerdatapb.UserPermission{
{
Host: "host1",
User: "user1",
PasswordChecksum: 666,
Privileges: map[string]string{
"create": "yes",
"delete": "no",
},
},
},
DbPermissions: []*tabletmanagerdatapb.DbPermission{
{
Host: "host2",
Db: "db1",
User: "user2",
Privileges: map[string]string{
"create": "no",
"delete": "yes",
},
},
},
}
func (fra *fakeRPCTM) GetPermissions(ctx context.Context) (*tabletmanagerdatapb.Permissions, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testGetPermissionsReply, nil
}
func tmRPCTestGetPermissions(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
result, err := client.GetPermissions(ctx, tablet)
compareError(t, "GetPermissions", err, result, testGetPermissionsReply)
}
func tmRPCTestGetPermissionsPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.GetPermissions(ctx, tablet)
expectHandleRPCPanic(t, "GetPermissions", false /*verbose*/, err)
}
//
// Various read-write methods
//
var testSetReadOnlyExpectedValue bool
func (fra *fakeRPCTM) SetReadOnly(ctx context.Context, rdonly bool) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
if rdonly != testSetReadOnlyExpectedValue {
fra.t.Errorf("Wrong SetReadOnly value: got %v expected %v", rdonly, testSetReadOnlyExpectedValue)
}
return nil
}
func tmRPCTestSetReadOnly(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
testSetReadOnlyExpectedValue = true
err := client.SetReadOnly(ctx, tablet)
if err != nil {
t.Errorf("SetReadOnly failed: %v", err)
}
testSetReadOnlyExpectedValue = false
err = client.SetReadWrite(ctx, tablet)
if err != nil {
t.Errorf("SetReadWrite failed: %v", err)
}
}
func tmRPCTestSetReadOnlyPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.SetReadOnly(ctx, tablet)
expectHandleRPCPanic(t, "SetReadOnly", true /*verbose*/, err)
err = client.SetReadWrite(ctx, tablet)
expectHandleRPCPanic(t, "SetReadWrite", true /*verbose*/, err)
}
var testChangeTypeValue = topodatapb.TabletType_REPLICA
func (fra *fakeRPCTM) ChangeType(ctx context.Context, tabletType topodatapb.TabletType) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ChangeType tabletType", tabletType, testChangeTypeValue)
return nil
}
func tmRPCTestChangeType(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ChangeType(ctx, tablet, testChangeTypeValue)
if err != nil {
t.Errorf("ChangeType failed: %v", err)
}
}
func tmRPCTestChangeTypePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ChangeType(ctx, tablet, testChangeTypeValue)
expectHandleRPCPanic(t, "ChangeType", true /*verbose*/, err)
}
var testSleepDuration = time.Minute
func (fra *fakeRPCTM) Sleep(ctx context.Context, duration time.Duration) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "Sleep duration", duration, testSleepDuration)
}
func tmRPCTestSleep(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.Sleep(ctx, tablet, testSleepDuration)
if err != nil {
t.Errorf("Sleep failed: %v", err)
}
}
func tmRPCTestSleepPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.Sleep(ctx, tablet, testSleepDuration)
expectHandleRPCPanic(t, "Sleep", true /*verbose*/, err)
}
var testExecuteHookHook = &hook.Hook{
Name: "captain hook",
Parameters: []string{"param1", "param2"},
ExtraEnv: map[string]string{
"boat": "blue",
"sea": "red",
},
}
var testExecuteHookHookResult = &hook.HookResult{
ExitStatus: hook.HOOK_STAT_FAILED,
Stdout: "out",
Stderr: "err",
}
func (fra *fakeRPCTM) ExecuteHook(ctx context.Context, hk *hook.Hook) *hook.HookResult {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ExecuteHook hook", hk, testExecuteHookHook)
return testExecuteHookHookResult
}
func tmRPCTestExecuteHook(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
hr, err := client.ExecuteHook(ctx, tablet, testExecuteHookHook)
compareError(t, "ExecuteHook", err, hr, testExecuteHookHookResult)
}
func tmRPCTestExecuteHookPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.ExecuteHook(ctx, tablet, testExecuteHookHook)
expectHandleRPCPanic(t, "ExecuteHook", true /*verbose*/, err)
}
var testRefreshStateCalled = false
func (fra *fakeRPCTM) RefreshState(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
if testRefreshStateCalled {
fra.t.Errorf("RefreshState called multiple times?")
}
testRefreshStateCalled = true
return nil
}
func tmRPCTestRefreshState(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.RefreshState(ctx, tablet)
if err != nil {
t.Errorf("RefreshState failed: %v", err)
}
if !testRefreshStateCalled {
t.Errorf("RefreshState didn't call the server side")
}
}
func tmRPCTestRefreshStatePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.RefreshState(ctx, tablet)
expectHandleRPCPanic(t, "RefreshState", true /*verbose*/, err)
}
func (fra *fakeRPCTM) RunHealthCheck(ctx context.Context) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
}
var testIgnoreHealthErrorValue = ".*"
func (fra *fakeRPCTM) IgnoreHealthError(ctx context.Context, pattern string) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "IgnoreHealthError pattern", pattern, testIgnoreHealthErrorValue)
return nil
}
func tmRPCTestRunHealthCheck(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.RunHealthCheck(ctx, tablet)
if err != nil {
t.Errorf("RunHealthCheck failed: %v", err)
}
}
func tmRPCTestRunHealthCheckPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.RunHealthCheck(ctx, tablet)
expectHandleRPCPanic(t, "RunHealthCheck", false /*verbose*/, err)
}
func tmRPCTestIgnoreHealthError(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.IgnoreHealthError(ctx, tablet, testIgnoreHealthErrorValue)
if err != nil {
t.Errorf("IgnoreHealthError failed: %v", err)
}
}
func tmRPCTestIgnoreHealthErrorPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.IgnoreHealthError(ctx, tablet, testIgnoreHealthErrorValue)
expectHandleRPCPanic(t, "IgnoreHealthError", false /*verbose*/, err)
}
var testReloadSchemaCalled = false
func (fra *fakeRPCTM) ReloadSchema(ctx context.Context, waitPosition string) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
if testReloadSchemaCalled {
fra.t.Errorf("ReloadSchema called multiple times?")
}
testReloadSchemaCalled = true
return nil
}
func tmRPCTestReloadSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReloadSchema(ctx, tablet, "")
if err != nil {
t.Errorf("ReloadSchema failed: %v", err)
}
if !testReloadSchemaCalled {
t.Errorf("ReloadSchema didn't call the server side")
}
}
func tmRPCTestReloadSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReloadSchema(ctx, tablet, "")
expectHandleRPCPanic(t, "ReloadSchema", false /*verbose*/, err)
}
var testPreflightSchema = []string{"change table add table cloth"}
var testSchemaChangeResult = []*tabletmanagerdatapb.SchemaChangeResult{
{
BeforeSchema: testGetSchemaReply,
AfterSchema: testGetSchemaReply,
},
}
func (fra *fakeRPCTM) PreflightSchema(ctx context.Context, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "PreflightSchema result", changes, testPreflightSchema)
return testSchemaChangeResult, nil
}
func tmRPCTestPreflightSchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
scr, err := client.PreflightSchema(ctx, tablet, testPreflightSchema)
compareError(t, "PreflightSchema", err, scr, testSchemaChangeResult)
}
func tmRPCTestPreflightSchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.PreflightSchema(ctx, tablet, testPreflightSchema)
expectHandleRPCPanic(t, "PreflightSchema", true /*verbose*/, err)
}
var testSchemaChange = &tmutils.SchemaChange{
SQL: "alter table add fruit basket",
Force: true,
AllowReplication: true,
BeforeSchema: testGetSchemaReply,
AfterSchema: testGetSchemaReply,
}
func (fra *fakeRPCTM) ApplySchema(ctx context.Context, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
if !change.Equal(testSchemaChange) {
fra.t.Errorf("Unexpected ApplySchema change:\ngot %#v\nwant %#v", change, testSchemaChange)
}
return testSchemaChangeResult[0], nil
}
func tmRPCTestApplySchema(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
scr, err := client.ApplySchema(ctx, tablet, testSchemaChange)
compareError(t, "ApplySchema", err, scr, testSchemaChangeResult[0])
}
func tmRPCTestApplySchemaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.ApplySchema(ctx, tablet, testSchemaChange)
expectHandleRPCPanic(t, "ApplySchema", true /*verbose*/, err)
}
var testExecuteFetchQuery = []byte("fetch this invalid utf8 character \x80")
var testExecuteFetchMaxRows = 100
var testExecuteFetchResult = &querypb.QueryResult{
Fields: []*querypb.Field{
{
Name: "column1",
Type: sqltypes.Blob,
},
{
Name: "column2",
Type: sqltypes.Datetime,
},
},
RowsAffected: 10,
InsertId: 32,
Rows: []*querypb.Row{
{
Lengths: []int64{
3,
-1,
},
Values: []byte{
'A', 'B', 'C',
},
},
},
}
func (fra *fakeRPCTM) ExecuteFetchAsDba(ctx context.Context, query []byte, dbName string, maxrows int, disableBinlogs bool, reloadSchema bool) (*querypb.QueryResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ExecuteFetchAsDba query", query, testExecuteFetchQuery)
compare(fra.t, "ExecuteFetchAsDba maxrows", maxrows, testExecuteFetchMaxRows)
compareBool(fra.t, "ExecuteFetchAsDba disableBinlogs", disableBinlogs)
compareBool(fra.t, "ExecuteFetchAsDba reloadSchema", reloadSchema)
return testExecuteFetchResult, nil
}
func (fra *fakeRPCTM) ExecuteFetchAsAllPrivs(ctx context.Context, query []byte, dbName string, maxrows int, reloadSchema bool) (*querypb.QueryResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ExecuteFetchAsAllPrivs query", query, testExecuteFetchQuery)
compare(fra.t, "ExecuteFetchAsAllPrivs maxrows", maxrows, testExecuteFetchMaxRows)
compareBool(fra.t, "ExecuteFetchAsAllPrivs reloadSchema", reloadSchema)
return testExecuteFetchResult, nil
}
func (fra *fakeRPCTM) ExecuteFetchAsApp(ctx context.Context, query []byte, maxrows int) (*querypb.QueryResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ExecuteFetchAsApp query", query, testExecuteFetchQuery)
compare(fra.t, "ExecuteFetchAsApp maxrows", maxrows, testExecuteFetchMaxRows)
return testExecuteFetchResult, nil
}
func tmRPCTestExecuteFetch(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
// using pool
qr, err := client.ExecuteFetchAsDba(ctx, tablet, true, testExecuteFetchQuery, testExecuteFetchMaxRows, true, true)
compareError(t, "ExecuteFetchAsDba", err, qr, testExecuteFetchResult)
qr, err = client.ExecuteFetchAsApp(ctx, tablet, true, testExecuteFetchQuery, testExecuteFetchMaxRows)
compareError(t, "ExecuteFetchAsApp", err, qr, testExecuteFetchResult)
// not using pool
qr, err = client.ExecuteFetchAsDba(ctx, tablet, false, testExecuteFetchQuery, testExecuteFetchMaxRows, true, true)
compareError(t, "ExecuteFetchAsDba", err, qr, testExecuteFetchResult)
qr, err = client.ExecuteFetchAsApp(ctx, tablet, false, testExecuteFetchQuery, testExecuteFetchMaxRows)
compareError(t, "ExecuteFetchAsApp", err, qr, testExecuteFetchResult)
qr, err = client.ExecuteFetchAsAllPrivs(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows, true)
compareError(t, "ExecuteFetchAsAllPrivs", err, qr, testExecuteFetchResult)
}
func tmRPCTestExecuteFetchPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
// using pool
_, err := client.ExecuteFetchAsDba(ctx, tablet, true, testExecuteFetchQuery, testExecuteFetchMaxRows, true, false)
expectHandleRPCPanic(t, "ExecuteFetchAsDba", false /*verbose*/, err)
_, err = client.ExecuteFetchAsApp(ctx, tablet, true, testExecuteFetchQuery, testExecuteFetchMaxRows)
expectHandleRPCPanic(t, "ExecuteFetchAsApp", false /*verbose*/, err)
// not using pool
_, err = client.ExecuteFetchAsDba(ctx, tablet, false, testExecuteFetchQuery, testExecuteFetchMaxRows, true, false)
expectHandleRPCPanic(t, "ExecuteFetchAsDba", false /*verbose*/, err)
_, err = client.ExecuteFetchAsApp(ctx, tablet, false, testExecuteFetchQuery, testExecuteFetchMaxRows)
expectHandleRPCPanic(t, "ExecuteFetchAsApp", false /*verbose*/, err)
_, err = client.ExecuteFetchAsAllPrivs(ctx, tablet, testExecuteFetchQuery, testExecuteFetchMaxRows, false)
expectHandleRPCPanic(t, "ExecuteFetchAsAllPrivs", false /*verbose*/, err)
}
//
// Replication related methods
//
var testReplicationStatus = &replicationdatapb.Status{
Position: "MariaDB/1-345-789",
IoThreadRunning: true,
SqlThreadRunning: true,
SecondsBehindMaster: 654,
MasterHost: "master.host",
MasterPort: 3366,
MasterConnectRetry: 12,
}
var testMasterStatus = &replicationdatapb.MasterStatus{Position: "MariaDB/1-345-789"}
func (fra *fakeRPCTM) MasterStatus(ctx context.Context) (*replicationdatapb.MasterStatus, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testMasterStatus, nil
}
func (fra *fakeRPCTM) ReplicationStatus(ctx context.Context) (*replicationdatapb.Status, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testReplicationStatus, nil
}
func tmRPCTestReplicationStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rs, err := client.ReplicationStatus(ctx, tablet)
compareError(t, "ReplicationStatus", err, rs, testReplicationStatus)
}
func tmRPCTestReplicationStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.ReplicationStatus(ctx, tablet)
expectHandleRPCPanic(t, "ReplicationStatus", false /*verbose*/, err)
}
var testReplicationPosition = "MariaDB/5-456-890"
func (fra *fakeRPCTM) MasterPosition(ctx context.Context) (string, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testReplicationPosition, nil
}
func (fra *fakeRPCTM) WaitForPosition(ctx context.Context, pos string) error {
panic("unimplemented")
}
func tmRPCTestMasterPosition(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) |
func tmRPCTestMasterPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.MasterPosition(ctx, tablet)
expectHandleRPCPanic(t, "MasterPosition", false /*verbose*/, err)
}
var testStopReplicationCalled = false
func (fra *fakeRPCTM) StopReplication(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
testStopReplicationCalled = true
return nil
}
func tmRPCTestStopReplication(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.StopReplication(ctx, tablet)
compareError(t, "StopReplication", err, true, testStopReplicationCalled)
}
func tmRPCTestStopReplicationPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.StopReplication(ctx, tablet)
expectHandleRPCPanic(t, "StopReplication", true /*verbose*/, err)
}
var testStopReplicationMinimumWaitTime = time.Hour
func (fra *fakeRPCTM) StopReplicationMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "StopReplicationMinimum position", position, testReplicationPosition)
compare(fra.t, "StopReplicationMinimum waitTime", waitTime, testStopReplicationMinimumWaitTime)
return testReplicationPositionReturned, nil
}
func tmRPCTestStopReplicationMinimum(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
pos, err := client.StopReplicationMinimum(ctx, tablet, testReplicationPosition, testStopReplicationMinimumWaitTime)
compareError(t, "StopReplicationMinimum", err, pos, testReplicationPositionReturned)
}
func tmRPCTestStopReplicationMinimumPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.StopReplicationMinimum(ctx, tablet, testReplicationPosition, testStopReplicationMinimumWaitTime)
expectHandleRPCPanic(t, "StopReplicationMinimum", true /*verbose*/, err)
}
var testStartReplicationCalled = false
func (fra *fakeRPCTM) StartReplication(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
testStartReplicationCalled = true
return nil
}
func tmRPCTestStartReplication(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.StartReplication(ctx, tablet)
compareError(t, "StartReplication", err, true, testStartReplicationCalled)
}
func tmRPCTestStartReplicationPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.StartReplication(ctx, tablet)
expectHandleRPCPanic(t, "StartReplication", true /*verbose*/, err)
}
var testStartReplicationUntilAfterCalledWith = ""
func (fra *fakeRPCTM) StartReplicationUntilAfter(ctx context.Context, position string, waitTime time.Duration) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
testStartReplicationUntilAfterCalledWith = position
return nil
}
func tmRPCTestStartReplicationUntilAfter(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.StartReplicationUntilAfter(ctx, tablet, "test-position", time.Minute)
compareError(t, "StartReplicationUntilAfter", err, "test-position", testStartReplicationUntilAfterCalledWith)
}
var testGetReplicasResult = []string{"replica1", "replica22"}
func (fra *fakeRPCTM) GetReplicas(ctx context.Context) ([]string, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testGetReplicasResult, nil
}
func tmRPCTestGetReplicas(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
s, err := client.GetReplicas(ctx, tablet)
compareError(t, "GetReplicas", err, s, testGetReplicasResult)
}
func tmRPCTestGetReplicasPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.GetReplicas(ctx, tablet)
expectHandleRPCPanic(t, "GetReplicas", false /*verbose*/, err)
}
func (fra *fakeRPCTM) VExec(ctx context.Context, query, workflow, keyspace string) (*querypb.QueryResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "VExec query", query, "query")
return testExecuteFetchResult, nil
}
var testVRQuery = "query"
func (fra *fakeRPCTM) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "VReplicationExec query", query, testVRQuery)
return testExecuteFetchResult, nil
}
func tmRPCTestVReplicationExec(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rp, err := client.VReplicationExec(ctx, tablet, testVRQuery)
compareError(t, "VReplicationExec", err, rp, testExecuteFetchResult)
}
func tmRPCTestVReplicationExecPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.VReplicationExec(ctx, tablet, testVRQuery)
expectHandleRPCPanic(t, "VReplicationExec", true /*verbose*/, err)
}
var (
wfpid = 3
wfppos = ""
)
func (fra *fakeRPCTM) VReplicationWaitForPos(ctx context.Context, id int, pos string) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "VReplicationWaitForPos id", id, wfpid)
compare(fra.t, "VReplicationWaitForPos pos", pos, wfppos)
return nil
}
func tmRPCTestVReplicationWaitForPos(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.VReplicationWaitForPos(ctx, tablet, wfpid, wfppos)
compareError(t, "VReplicationWaitForPos", err, true, true)
}
func tmRPCTestVReplicationWaitForPosPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.VReplicationWaitForPos(ctx, tablet, wfpid, wfppos)
expectHandleRPCPanic(t, "VReplicationWaitForPos", true /*verbose*/, err)
}
//
// Reparenting related functions
//
var testResetReplicationCalled = false
func (fra *fakeRPCTM) ResetReplication(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
testResetReplicationCalled = true
return nil
}
func tmRPCTestResetReplication(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ResetReplication(ctx, tablet)
compareError(t, "ResetReplication", err, true, testResetReplicationCalled)
}
func tmRPCTestResetReplicationPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ResetReplication(ctx, tablet)
expectHandleRPCPanic(t, "ResetReplication", true /*verbose*/, err)
}
func (fra *fakeRPCTM) InitMaster(ctx context.Context) (string, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testReplicationPosition, nil
}
func tmRPCTestInitMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rp, err := client.InitMaster(ctx, tablet)
compareError(t, "InitMaster", err, rp, testReplicationPosition)
}
func tmRPCTestInitMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.InitMaster(ctx, tablet)
expectHandleRPCPanic(t, "InitMaster", true /*verbose*/, err)
}
var testPopulateReparentJournalCalled = false
var testTimeCreatedNS int64 = 4569900
var testWaitPosition = "test wait position"
var testActionName = "TestActionName"
var testMasterAlias = &topodatapb.TabletAlias{
Cell: "ce",
Uid: 372,
}
func (fra *fakeRPCTM) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, masterAlias *topodatapb.TabletAlias, position string) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "PopulateReparentJournal timeCreatedNS", timeCreatedNS, testTimeCreatedNS)
compare(fra.t, "PopulateReparentJournal actionName", actionName, testActionName)
compare(fra.t, "PopulateReparentJournal masterAlias", masterAlias, testMasterAlias)
compare(fra.t, "PopulateReparentJournal pos", position, testReplicationPosition)
testPopulateReparentJournalCalled = true
return nil
}
func tmRPCTestPopulateReparentJournal(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.PopulateReparentJournal(ctx, tablet, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition)
compareError(t, "PopulateReparentJournal", err, true, testPopulateReparentJournalCalled)
}
func tmRPCTestPopulateReparentJournalPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.PopulateReparentJournal(ctx, tablet, testTimeCreatedNS, testActionName, testMasterAlias, testReplicationPosition)
expectHandleRPCPanic(t, "PopulateReparentJournal", false /*verbose*/, err)
}
var testInitReplicaCalled = false
func (fra *fakeRPCTM) InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "InitReplica parent", parent, testMasterAlias)
compare(fra.t, "InitReplica pos", position, testReplicationPosition)
compare(fra.t, "InitReplica timeCreatedNS", timeCreatedNS, testTimeCreatedNS)
testInitReplicaCalled = true
return nil
}
func tmRPCTestInitReplica(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.InitReplica(ctx, tablet, testMasterAlias, testReplicationPosition, testTimeCreatedNS)
compareError(t, "InitReplica", err, true, testInitReplicaCalled)
}
func tmRPCTestInitReplicaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.InitReplica(ctx, tablet, testMasterAlias, testReplicationPosition, testTimeCreatedNS)
expectHandleRPCPanic(t, "InitReplica", true /*verbose*/, err)
}
func (fra *fakeRPCTM) DemoteMaster(ctx context.Context) (*replicationdatapb.MasterStatus, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testMasterStatus, nil
}
func tmRPCTestDemoteMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
masterStatus, err := client.DemoteMaster(ctx, tablet)
compareError(t, "DemoteMaster", err, masterStatus.Position, testMasterStatus.Position)
}
func tmRPCTestDemoteMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.DemoteMaster(ctx, tablet)
expectHandleRPCPanic(t, "DemoteMaster", true /*verbose*/, err)
}
var testUndoDemoteMasterCalled = false
func (fra *fakeRPCTM) UndoDemoteMaster(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return nil
}
func tmRPCTestUndoDemoteMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.UndoDemoteMaster(ctx, tablet)
testUndoDemoteMasterCalled = true
compareError(t, "UndoDemoteMaster", err, true, testUndoDemoteMasterCalled)
}
func tmRPCTestUndoDemoteMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.UndoDemoteMaster(ctx, tablet)
expectHandleRPCPanic(t, "UndoDemoteMaster", true /*verbose*/, err)
}
var testReplicationPositionReturned = "MariaDB/5-567-3456"
var testReplicaWasPromotedCalled = false
func (fra *fakeRPCTM) ReplicaWasPromoted(ctx context.Context) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
testReplicaWasPromotedCalled = true
return nil
}
func tmRPCTestReplicaWasPromoted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReplicaWasPromoted(ctx, tablet)
compareError(t, "ReplicaWasPromoted", err, true, testReplicaWasPromotedCalled)
}
func tmRPCTestReplicaWasPromotedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReplicaWasPromoted(ctx, tablet)
expectHandleRPCPanic(t, "ReplicaWasPromoted", true /*verbose*/, err)
}
var testSetMasterCalled = false
var testForceStartReplica = true
func (fra *fakeRPCTM) SetMaster(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplica bool) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "SetMaster parent", parent, testMasterAlias)
compare(fra.t, "SetMaster timeCreatedNS", timeCreatedNS, testTimeCreatedNS)
compare(fra.t, "SetMaster waitPosition", waitPosition, testWaitPosition)
compare(fra.t, "SetMaster forceStartReplica", forceStartReplica, testForceStartReplica)
testSetMasterCalled = true
return nil
}
func tmRPCTestSetMaster(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.SetMaster(ctx, tablet, testMasterAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica)
compareError(t, "SetMaster", err, true, testSetMasterCalled)
}
func tmRPCTestSetMasterPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.SetMaster(ctx, tablet, testMasterAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica)
expectHandleRPCPanic(t, "SetMaster", true /*verbose*/, err)
}
func (fra *fakeRPCTM) StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (tabletmanager.StopReplicationAndGetStatusResponse, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return tabletmanager.StopReplicationAndGetStatusResponse{
HybridStatus: testReplicationStatus,
Status: &replicationdatapb.StopReplicationStatus{
Before: testReplicationStatus,
After: testReplicationStatus,
},
}, nil
}
var testReplicaWasRestartedParent = &topodatapb.TabletAlias{
Cell: "prison",
Uid: 42,
}
var testReplicaWasRestartedCalled = false
func (fra *fakeRPCTM) ReplicaWasRestarted(ctx context.Context, parent *topodatapb.TabletAlias) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "ReplicaWasRestarted parent", parent, testReplicaWasRestartedParent)
testReplicaWasRestartedCalled = true
return nil
}
func tmRPCTestReplicaWasRestarted(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReplicaWasRestarted(ctx, tablet, testReplicaWasRestartedParent)
compareError(t, "ReplicaWasRestarted", err, true, testReplicaWasRestartedCalled)
}
func tmRPCTestReplicaWasRestartedPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
err := client.ReplicaWasRestarted(ctx, tablet, testReplicaWasRestartedParent)
expectHandleRPCPanic(t, "ReplicaWasRestarted", true /*verbose*/, err)
}
func tmRPCTestStopReplicationAndGetStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rp, _, err := client.StopReplicationAndGetStatus(ctx, tablet, replicationdatapb.StopReplicationMode_IOANDSQLTHREAD)
compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus)
rp, _, err = client.StopReplicationAndGetStatus(ctx, tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY)
compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus)
}
func tmRPCTestStopReplicationAndGetStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, _, err := client.StopReplicationAndGetStatus(ctx, tablet, replicationdatapb.StopReplicationMode_IOANDSQLTHREAD)
expectHandleRPCPanic(t, "StopReplicationAndGetStatus", true /*verbose*/, err)
}
func (fra *fakeRPCTM) PromoteReplica(ctx context.Context) (string, error) {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
return testReplicationPosition, nil
}
func tmRPCTestPromoteReplica(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rp, err := client.PromoteReplica(ctx, tablet)
compareError(t, "PromoteReplica", err, rp, testReplicationPosition)
}
func tmRPCTestPromoteReplicaPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
_, err := client.PromoteReplica(ctx, tablet)
expectHandleRPCPanic(t, "PromoteReplica", true /*verbose*/, err)
}
//
// Backup / restore related methods
//
var testBackupConcurrency = 24
var testBackupAllowMaster = false
var testBackupCalled = false
var testRestoreFromBackupCalled = false
func (fra *fakeRPCTM) Backup(ctx context.Context, concurrency int, logger logutil.Logger, allowMaster bool) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
compare(fra.t, "Backup args", concurrency, testBackupConcurrency)
compare(fra.t, "Backup args", allowMaster, testBackupAllowMaster)
logStuff(logger, 10)
testBackupCalled = true
return nil
}
func tmRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
stream, err := client.Backup(ctx, tablet, testBackupConcurrency, testBackupAllowMaster)
if err != nil {
t.Fatalf("Backup failed: %v", err)
}
err = compareLoggedStuff(t, "Backup", stream, 10)
compareError(t, "Backup", err, true, testBackupCalled)
}
func tmRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
stream, err := client.Backup(ctx, tablet, testBackupConcurrency, testBackupAllowMaster)
if err != nil {
t.Fatalf("Backup failed: %v", err)
}
e, err := stream.Recv()
if err == nil {
t.Fatalf("Unexpected Backup logs: %v", e)
}
expectHandleRPCPanic(t, "Backup", true /*verbose*/, err)
}
func (fra *fakeRPCTM) RestoreFromBackup(ctx context.Context, logger logutil.Logger) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
logStuff(logger, 10)
testRestoreFromBackupCalled = true
return nil
}
func tmRPCTestRestoreFromBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
stream, err := client.RestoreFromBackup(ctx, tablet)
if err != nil {
t.Fatalf("RestoreFromBackup failed: %v", err)
}
err = compareLoggedStuff(t, "RestoreFromBackup", stream, 10)
compareError(t, "RestoreFromBackup", err, true, testRestoreFromBackupCalled)
}
func tmRPCTestRestoreFromBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
stream, err := client.RestoreFromBackup(ctx, tablet)
if err != nil {
t.Fatalf("RestoreFromBackup failed: %v", err)
}
e, err := stream.Recv()
if err == nil {
t.Fatalf("Unexpected RestoreFromBackup logs: %v", e)
}
expectHandleRPCPanic(t, "RestoreFromBackup", true /*verbose*/, err)
}
//
// RPC helpers
//
// HandleRPCPanic is part of the RPCTM interface
func (fra *fakeRPCTM) HandleRPCPanic(ctx context.Context, name string, args, reply interface{}, verbose bool, err *error) {
if x := recover(); x != nil {
// Use the panic case to make sure 'name' and 'verbose' are right.
*err = fmt.Errorf("HandleRPCPanic caught panic during %v with verbose %v", name, verbose)
}
}
// methods to test individual API calls
// Run will run the test suite using the provided client and
// the provided tablet. Tablet's vt address needs to be configured so
// the client will connect to a server backed by our RPCTM (returned
// by NewFakeRPCTM)
func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, fakeTM tabletmanager.RPCTM) {
ctx := context.Background()
// Test RPC specific methods of the interface.
tmRPCTestDialExpiredContext(ctx, t, client, tablet)
tmRPCTestRPCTimeout(ctx, t, client, tablet, fakeTM.(*fakeRPCTM))
// Various read-only methods
tmRPCTestPing(ctx, t, client, tablet)
tmRPCTestGetSchema(ctx, t, client, tablet)
tmRPCTestGetPermissions(ctx, t, client, tablet)
// Various read-write methods
tmRPCTestSetReadOnly(ctx, t, client, tablet)
tmRPCTestChangeType(ctx, t, client, tablet)
tmRPCTestSleep(ctx, t, client, tablet)
tmRPCTestExecuteHook(ctx, t, client, tablet)
tmRPCTestRefreshState(ctx, t, client, tablet)
tmRPCTestRunHealthCheck(ctx, t, client, tablet)
tmRPCTestIgnoreHealthError(ctx, t, client, tablet)
tmRPCTestReloadSchema(ctx, t, client, tablet)
tmRPCTestPreflightSchema(ctx, t, client, tablet)
tmRPCTestApplySchema(ctx, t, client, tablet)
tmRPCTestExecuteFetch(ctx, t, client, tablet)
// Replication related methods
tmRPCTestMasterPosition(ctx, t, client, tablet)
tmRPCTestReplicationStatus(ctx, t, client, tablet)
tmRPCTestMasterPosition(ctx, t, client, tablet)
tmRPCTestStopReplication(ctx, t, client, tablet)
tmRPCTestStopReplicationMinimum(ctx, t, client, tablet)
tmRPCTestStartReplication(ctx, t, client, tablet)
tmRPCTestStartReplicationUntilAfter(ctx, t, client, tablet)
tmRPCTestGetReplicas(ctx, t, client, tablet)
// VReplication methods
tmRPCTestVReplicationExec(ctx, t, client, tablet)
tmRPCTestVReplicationWaitForPos(ctx, t, client, tablet)
// Reparenting related functions
tmRPCTestResetReplication(ctx, t, client, tablet)
tmRPCTestInitMaster(ctx, t, client, tablet)
tmRPCTestPopulateReparentJournal(ctx, t, client, tablet)
tmRPCTestDemoteMaster(ctx, t, client, tablet)
tmRPCTestUndoDemoteMaster(ctx, t, client, tablet)
tmRPCTestSetMaster(ctx, t, client, tablet)
tmRPCTestStopReplicationAndGetStatus(ctx, t, client, tablet)
tmRPCTestPromoteReplica(ctx, t, client, tablet)
tmRPCTestInitReplica(ctx, t, client, tablet)
tmRPCTestReplicaWasPromoted(ctx, t, client, tablet)
tmRPCTestReplicaWasRestarted(ctx, t, client, tablet)
// Backup / restore related methods
tmRPCTestBackup(ctx, t, client, tablet)
tmRPCTestRestoreFromBackup(ctx, t, client, tablet)
//
// Tests panic handling everywhere now
//
fakeTM.(*fakeRPCTM).panics = true
// Various read-only methods
tmRPCTestPingPanic(ctx, t, client, tablet)
tmRPCTestGetSchemaPanic(ctx, t, client, tablet)
tmRPCTestGetPermissionsPanic(ctx, t, client, tablet)
// Various read-write methods
tmRPCTestSetReadOnlyPanic(ctx, t, client, tablet)
tmRPCTestChangeTypePanic(ctx, t, client, tablet)
tmRPCTestSleepPanic(ctx, t, client, tablet)
tmRPCTestExecuteHookPanic(ctx, t, client, tablet)
tmRPCTestRefreshStatePanic(ctx, t, client, tablet)
tmRPCTestRunHealthCheckPanic(ctx, t, client, tablet)
tmRPCTestIgnoreHealthErrorPanic(ctx, t, client, tablet)
tmRPCTestReloadSchemaPanic(ctx, t, client, tablet)
tmRPCTestPreflightSchemaPanic(ctx, t, client, tablet)
tmRPCTestApplySchemaPanic(ctx, t, client, tablet)
tmRPCTestExecuteFetchPanic(ctx, t, client, tablet)
// Replication related methods
tmRPCTestMasterPositionPanic(ctx, t, client, tablet)
tmRPCTestReplicationStatusPanic(ctx, t, client, tablet)
tmRPCTestStopReplicationPanic(ctx, t, client, tablet)
tmRPCTestStopReplicationMinimumPanic(ctx, t, client, tablet)
tmRPCTestStartReplicationPanic(ctx, t, client, tablet)
tmRPCTestGetReplicasPanic(ctx, t, client, tablet)
// VReplication methods
tmRPCTestVReplicationExecPanic(ctx, t, client, tablet)
tmRPCTestVReplicationWaitForPosPanic(ctx, t, client, tablet)
// Reparenting related functions
tmRPCTestResetReplicationPanic(ctx, t, client, tablet)
tmRPCTestInitMasterPanic(ctx, t, client, tablet)
tmRPCTestPopulateReparentJournalPanic(ctx, t, client, tablet)
tmRPCTestDemoteMasterPanic(ctx, t, client, tablet)
tmRPCTestUndoDemoteMasterPanic(ctx, t, client, tablet)
tmRPCTestSetMasterPanic(ctx, t, client, tablet)
tmRPCTestStopReplicationAndGetStatusPanic(ctx, t, client, tablet)
tmRPCTestPromoteReplicaPanic(ctx, t, client, tablet)
tmRPCTestInitReplicaPanic(ctx, t, client, tablet)
tmRPCTestReplicaWasPromotedPanic(ctx, t, client, tablet)
tmRPCTestReplicaWasRestartedPanic(ctx, t, client, tablet)
// Backup / restore related methods
tmRPCTestBackupPanic(ctx, t, client, tablet)
tmRPCTestRestoreFromBackupPanic(ctx, t, client, tablet)
client.Close()
}
| {
rs, err := client.MasterPosition(ctx, tablet)
compareError(t, "MasterPosition", err, rs, testReplicationPosition)
} |
types.rs | use std::collections::{BTreeMap, BTreeSet};
use std::collections::{HashMap, HashSet};
use std::convert::From;
use std::default::Default;
use std::error;
use std::fmt;
use std::hash::{BuildHasher, Hash};
use std::io;
use std::str::{from_utf8, Utf8Error};
use futures::Future;
/// Helper enum that is used in some situations to describe
/// the behavior of arguments in a numeric context.
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
pub enum NumericBehavior {
NonNumeric,
NumberIsInteger,
NumberIsFloat,
}
/// An enum of all error kinds.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ErrorKind {
/// The server generated an invalid response.
ResponseError,
/// The authentication with the server failed.
AuthenticationFailed,
/// Operation failed because of a type mismatch.
TypeError,
/// A script execution was aborted.
ExecAbortError,
/// The server cannot response because it's loading a dump.
BusyLoadingError,
/// A script that was requested does not actually exist.
NoScriptError,
/// An error that was caused because the parameter to the
/// client were wrong.
InvalidClientConfig,
/// This kind is returned if the redis error is one that is
/// not native to the system. This is usually the case if
/// the cause is another error.
IoError,
/// An extension error. This is an error created by the server
/// that is not directly understood by the library.
ExtensionError,
}
/// Internal low-level redis value enum.
#[derive(PartialEq, Eq, Clone)]
pub enum Value {
/// A nil response from the server.
Nil,
/// An integer response. Note that there are a few situations
/// in which redis actually returns a string for an integer which
/// is why this library generally treats integers and strings
/// the same for all numeric responses.
Int(i64),
/// An arbitary binary data.
Data(Vec<u8>),
/// A bulk response of more data. This is generally used by redis
/// to express nested structures.
Bulk(Vec<Value>),
/// A status response.
Status(String),
/// A status response which represents the string "OK".
Okay,
}
/// Values are generally not used directly unless you are using the
/// more low level functionality in the library. For the most part
/// this is hidden with the help of the `FromRedisValue` trait.
///
/// While on the redis protocol there is an error type this is already
/// separated at an early point so the value only holds the remaining
/// types.
impl Value {
/// Checks if the return value looks like it fulfils the cursor
/// protocol. That means the result is a bulk item of length
/// two with the first one being a cursor and the second a
/// bulk response.
pub fn looks_like_cursor(&self) -> bool {
match *self {
Value::Bulk(ref items) => {
if items.len() != 2 {
return false;
}
match items[0] {
Value::Data(_) => {}
_ => {
return false;
}
};
match items[1] {
Value::Bulk(_) => {}
_ => {
return false;
}
}
return true;
}
_ => {
return false;
}
}
}
}
impl fmt::Debug for Value {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Value::Nil => write!(fmt, "nil"),
Value::Int(val) => write!(fmt, "int({:?})", val),
Value::Data(ref val) => match from_utf8(val) {
Ok(x) => write!(fmt, "string-data('{:?}')", x),
Err(_) => write!(fmt, "binary-data({:?})", val),
},
Value::Bulk(ref values) => {
write!(fmt, "bulk(")?;
let mut is_first = true;
for val in values.iter() {
if !is_first {
write!(fmt, ", ")?;
}
write!(fmt, "{:?}", val)?;
is_first = false;
}
write!(fmt, ")")
}
Value::Okay => write!(fmt, "ok"),
Value::Status(ref s) => write!(fmt, "status({:?})", s),
}
}
}
/// Represents a redis error. For the most part you should be using
/// the Error trait to interact with this rather than the actual
/// struct.
pub struct RedisError {
repr: ErrorRepr,
}
#[derive(Debug)]
enum ErrorRepr {
WithDescription(ErrorKind, &'static str),
WithDescriptionAndDetail(ErrorKind, &'static str, String),
ExtensionError(String, String),
IoError(io::Error),
}
impl PartialEq for RedisError {
fn eq(&self, other: &RedisError) -> bool {
match (&self.repr, &other.repr) {
(&ErrorRepr::WithDescription(kind_a, _), &ErrorRepr::WithDescription(kind_b, _)) => {
kind_a == kind_b
}
(
&ErrorRepr::WithDescriptionAndDetail(kind_a, _, _),
&ErrorRepr::WithDescriptionAndDetail(kind_b, _, _),
) => kind_a == kind_b,
(&ErrorRepr::ExtensionError(ref a, _), &ErrorRepr::ExtensionError(ref b, _)) => {
*a == *b
}
_ => false,
}
}
}
impl From<io::Error> for RedisError {
fn from(err: io::Error) -> RedisError {
RedisError {
repr: ErrorRepr::IoError(err),
}
}
}
impl From<Utf8Error> for RedisError {
fn from(_: Utf8Error) -> RedisError {
RedisError {
repr: ErrorRepr::WithDescription(ErrorKind::TypeError, "Invalid UTF-8"),
}
}
}
impl From<(ErrorKind, &'static str)> for RedisError {
fn from((kind, desc): (ErrorKind, &'static str)) -> RedisError {
RedisError {
repr: ErrorRepr::WithDescription(kind, desc),
}
}
}
impl From<(ErrorKind, &'static str, String)> for RedisError {
fn from((kind, desc, detail): (ErrorKind, &'static str, String)) -> RedisError {
RedisError {
repr: ErrorRepr::WithDescriptionAndDetail(kind, desc, detail),
}
}
}
impl error::Error for RedisError {
fn description(&self) -> &str {
match self.repr {
ErrorRepr::WithDescription(_, desc) => desc,
ErrorRepr::WithDescriptionAndDetail(_, desc, _) => desc,
ErrorRepr::ExtensionError(_, _) => "extension error",
ErrorRepr::IoError(ref err) => err.description(),
}
}
fn cause(&self) -> Option<&error::Error> {
match self.repr {
ErrorRepr::IoError(ref err) => Some(err as &error::Error),
_ => None,
}
}
}
impl fmt::Display for RedisError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self.repr {
ErrorRepr::WithDescription(_, desc) => desc.fmt(f),
ErrorRepr::WithDescriptionAndDetail(_, desc, ref detail) => {
desc.fmt(f)?;
f.write_str(": ")?;
detail.fmt(f)
}
ErrorRepr::ExtensionError(ref code, ref detail) => {
code.fmt(f)?;
f.write_str(": ")?;
detail.fmt(f)
}
ErrorRepr::IoError(ref err) => err.fmt(f),
}
}
}
impl fmt::Debug for RedisError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Display::fmt(self, f)
}
}
/// Indicates a general failure in the library.
impl RedisError {
/// Returns the kind of the error.
pub fn kind(&self) -> ErrorKind {
match self.repr {
ErrorRepr::WithDescription(kind, _) => kind,
ErrorRepr::WithDescriptionAndDetail(kind, _, _) => kind,
ErrorRepr::ExtensionError(_, _) => ErrorKind::ExtensionError,
ErrorRepr::IoError(_) => ErrorKind::IoError,
}
}
/// Returns the name of the error category for display purposes.
pub fn category(&self) -> &str {
match self.kind() {
ErrorKind::ResponseError => "response error",
ErrorKind::AuthenticationFailed => "authentication failed",
ErrorKind::TypeError => "type error",
ErrorKind::ExecAbortError => "script execution aborted",
ErrorKind::BusyLoadingError => "busy loading",
ErrorKind::NoScriptError => "no script",
ErrorKind::InvalidClientConfig => "invalid client config",
ErrorKind::IoError => "I/O error",
ErrorKind::ExtensionError => "extension error",
}
}
/// Indicates that this failure is an IO failure.
pub fn is_io_error(&self) -> bool {
match self.kind() {
ErrorKind::IoError => true,
_ => false,
}
}
/// Returns true if this error indicates that the connection was
/// refused. You should generally not rely much on this function
/// unless you are writing unit tests that want to detect if a
/// local server is available.
pub fn is_connection_refusal(&self) -> bool {
match self.repr {
ErrorRepr::IoError(ref err) => {
match err.kind() {
io::ErrorKind::ConnectionRefused => true,
// if we connect to a unix socket and the file does not
// exist yet, then we want to treat this as if it was a
// connection refusal.
io::ErrorKind::NotFound => cfg!(unix),
_ => false,
}
}
_ => false,
}
}
/// Returns true if error was caused by I/O time out.
/// Note that this may not be accurate depending on platform.
pub fn is_timeout(&self) -> bool {
match self.repr {
ErrorRepr::IoError(ref err) => match err.kind() {
io::ErrorKind::TimedOut => true,
io::ErrorKind::WouldBlock => true,
_ => false,
},
_ => false,
}
}
/// Returns true if error was caused by a dropped connection.
pub fn is_connection_dropped(&self) -> bool {
match self.repr {
ErrorRepr::IoError(ref err) => match err.kind() {
io::ErrorKind::BrokenPipe | io::ErrorKind::ConnectionReset => true,
_ => false,
},
_ => false,
}
}
/// Returns the extension error code
pub fn extension_error_code(&self) -> Option<&str> {
match self.repr {
ErrorRepr::ExtensionError(ref code, _) => Some(&code),
_ => None,
}
}
}
pub fn make_extension_error(code: &str, detail: Option<&str>) -> RedisError {
RedisError {
repr: ErrorRepr::ExtensionError(
code.to_string(),
match detail {
Some(x) => x.to_string(),
None => "Unknown extension error encountered".to_string(),
},
),
}
}
/// Library generic result type.
pub type RedisResult<T> = Result<T, RedisError>;
pub type RedisFuture<T> = Box<Future<Item = T, Error = RedisError> + Send>;
/// An info dictionary type.
#[derive(Debug)]
pub struct InfoDict {
map: HashMap<String, Value>,
}
/// This type provides convenient access to key/value data returned by
/// the "INFO" command. It acts like a regular mapping but also has
/// a convenience method `get` which can return data in the appropriate
/// type.
///
/// For instance this can be used to query the server for the role it's
/// in (master, slave) etc:
///
/// ```rust,no_run
/// # fn do_something() -> redis::RedisResult<()> {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let mut con = client.get_connection().unwrap();
/// let info : redis::InfoDict = redis::cmd("INFO").query(&mut con)?;
/// let role : Option<String> = info.get("role");
/// # Ok(()) }
/// ```
impl InfoDict {
/// Creates a new info dictionary from a string in the response of
/// the INFO command. Each line is a key, value pair with the
/// key and value separated by a colon (`:`). Lines starting with a
/// hash (`#`) are ignored.
pub fn new(kvpairs: &str) -> InfoDict {
let mut map = HashMap::new();
for line in kvpairs.lines() {
if line.len() == 0 || line.starts_with("#") {
continue;
}
let mut p = line.splitn(2, ':');
let k = unwrap_or!(p.next(), continue).to_string();
let v = unwrap_or!(p.next(), continue).to_string();
map.insert(k, Value::Status(v));
}
InfoDict { map: map }
}
/// Fetches a value by key and converts it into the given type.
/// Typical types are `String`, `bool` and integer types.
pub fn get<T: FromRedisValue>(&self, key: &str) -> Option<T> {
match self.find(&key) {
Some(ref x) => from_redis_value(*x).ok(),
None => None,
}
}
pub fn find(&self, key: &&str) -> Option<&Value> {
self.map.get(*key)
}
pub fn contains_key(&self, key: &&str) -> bool {
self.find(key).is_some()
}
pub fn len(&self) -> usize {
self.map.len()
}
}
pub trait RedisWrite {
fn write_arg(&mut self, arg: &[u8]);
}
impl RedisWrite for Vec<Vec<u8>> {
fn write_arg(&mut self, arg: &[u8]) {
self.push(arg.to_owned());
}
}
/// Used to convert a value into one or multiple redis argument
/// strings. Most values will produce exactly one item but in
/// some cases it might make sense to produce more than one.
pub trait ToRedisArgs: Sized {
/// This converts the value into a vector of bytes. Each item
/// is a single argument. Most items generate a vector of a
/// single item.
///
/// The exception to this rule currently are vectors of items.
fn to_redis_args(&self) -> Vec<Vec<u8>> {
let mut out = Vec::new();
self.write_redis_args(&mut out);
out
}
/// This writes the value into a vector of bytes. Each item
/// is a single argument. Most items generate a single item.
///
/// The exception to this rule currently are vectors of items.
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite;
/// Returns an information about the contained value with regards
/// to it's numeric behavior in a redis context. This is used in
/// some high level concepts to switch between different implementations
/// of redis functions (for instance `INCR` vs `INCRBYFLOAT`).
fn describe_numeric_behavior(&self) -> NumericBehavior {
NumericBehavior::NonNumeric
}
/// Returns an indiciation if the value contained is exactly one
/// argument. It returns false if it's zero or more than one. This
/// is used in some high level functions to intelligently switch
/// between `GET` and `MGET` variants.
fn is_single_arg(&self) -> bool {
true
}
/// This only exists internally as a workaround for the lack of
/// specialization.
#[doc(hidden)]
fn make_arg_vec<W>(items: &[Self], out: &mut W)
where
W: ?Sized + RedisWrite,
{
for item in items.iter() {
item.write_redis_args(out);
}
}
/// This only exists internally as a workaround for the lack of
/// specialization.
#[doc(hidden)]
fn make_arg_iter_ref<'a, I, W>(items: I, out: &mut W)
where
W: ?Sized + RedisWrite,
I: Iterator<Item = &'a Self>,
Self: 'a,
{
for item in items {
item.write_redis_args(out);
}
}
#[doc(hidden)]
fn is_single_vec_arg(items: &[Self]) -> bool {
items.len() == 1 && items[0].is_single_arg()
}
}
macro_rules! invalid_type_error {
($v:expr, $det:expr) => {{
fail!((
ErrorKind::TypeError,
"Response was of incompatible type",
format!("{:?} (response was {:?})", $det, $v)
));
}};
}
macro_rules! itoa_based_to_redis_impl {
($t:ty, $numeric:expr) => {
impl ToRedisArgs for $t {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
let mut buf = ::itoa::Buffer::new();
let s = buf.format(*self);
out.write_arg(s.as_bytes())
}
fn describe_numeric_behavior(&self) -> NumericBehavior {
$numeric
}
}
};
}
macro_rules! dtoa_based_to_redis_impl {
($t:ty, $numeric:expr) => {
impl ToRedisArgs for $t {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
let mut buf = Vec::new();
::dtoa::write(&mut buf, *self).unwrap();
out.write_arg(&buf)
}
fn describe_numeric_behavior(&self) -> NumericBehavior {
$numeric
}
}
};
}
impl ToRedisArgs for u8 {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
let mut buf = [0u8; 3];
let n = ::itoa::write(&mut buf[..], *self).unwrap();
out.write_arg(&buf[..n])
}
fn make_arg_vec<W>(items: &[u8], out: &mut W)
where
W: ?Sized + RedisWrite,
{
out.write_arg(items);
}
fn is_single_vec_arg(_items: &[u8]) -> bool {
true
}
}
itoa_based_to_redis_impl!(i8, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(i16, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(u16, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(i32, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(u32, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(i64, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(u64, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(isize, NumericBehavior::NumberIsInteger);
itoa_based_to_redis_impl!(usize, NumericBehavior::NumberIsInteger);
dtoa_based_to_redis_impl!(f32, NumericBehavior::NumberIsFloat);
dtoa_based_to_redis_impl!(f64, NumericBehavior::NumberIsFloat);
impl ToRedisArgs for bool {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
out.write_arg(if *self { b"true" } else { b"false" })
}
}
impl ToRedisArgs for String {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
out.write_arg(self.as_bytes())
}
}
impl<'a> ToRedisArgs for &'a String {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
out.write_arg(self.as_bytes())
}
}
impl<'a> ToRedisArgs for &'a str {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
out.write_arg(self.as_bytes())
}
}
impl<T: ToRedisArgs> ToRedisArgs for Vec<T> {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
ToRedisArgs::make_arg_vec(self, out)
}
fn is_single_arg(&self) -> bool {
ToRedisArgs::is_single_vec_arg(&self[..])
}
}
impl<'a, T: ToRedisArgs> ToRedisArgs for &'a [T] {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
ToRedisArgs::make_arg_vec(*self, out)
}
fn is_single_arg(&self) -> bool {
ToRedisArgs::is_single_vec_arg(*self)
}
}
impl<T: ToRedisArgs> ToRedisArgs for Option<T> {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
match *self {
Some(ref x) => x.write_redis_args(out),
None => (),
}
}
fn describe_numeric_behavior(&self) -> NumericBehavior {
match *self {
Some(ref x) => x.describe_numeric_behavior(),
None => NumericBehavior::NonNumeric,
}
}
fn is_single_arg(&self) -> bool {
match *self {
Some(ref x) => x.is_single_arg(),
None => false,
}
}
}
/// @note: Redis cannot store empty sets so the application has to
/// check whether the set is empty and if so, not attempt to use that
/// result
impl<T: ToRedisArgs + Hash + Eq, S: BuildHasher + Default> ToRedisArgs for HashSet<T, S> {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
ToRedisArgs::make_arg_iter_ref(self.iter(), out)
}
fn is_single_arg(&self) -> bool {
self.len() <= 1
}
}
/// @note: Redis cannot store empty sets so the application has to
/// check whether the set is empty and if so, not attempt to use that
/// result
impl<T: ToRedisArgs + Hash + Eq + Ord> ToRedisArgs for BTreeSet<T> {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
ToRedisArgs::make_arg_iter_ref(self.iter(), out)
}
fn is_single_arg(&self) -> bool {
self.len() <= 1
}
}
/// this flattens BTreeMap into something that goes well with HMSET
/// @note: Redis cannot store empty sets so the application has to
/// check whether the set is empty and if so, not attempt to use that
/// result
impl<T: ToRedisArgs + Hash + Eq + Ord, V: ToRedisArgs> ToRedisArgs for BTreeMap<T, V> {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
for (key, value) in self {
// otherwise things like HMSET will simply NOT work
assert!(key.is_single_arg() && value.is_single_arg());
key.write_redis_args(out);
value.write_redis_args(out);
}
}
fn is_single_arg(&self) -> bool {
self.len() <= 1
}
}
macro_rules! to_redis_args_for_tuple {
() => ();
($($name:ident,)+) => (
#[doc(hidden)]
impl<$($name: ToRedisArgs),*> ToRedisArgs for ($($name,)*) {
// we have local variables named T1 as dummies and those
// variables are unused.
#[allow(non_snake_case, unused_variables)]
fn write_redis_args<W>(&self, out: &mut W) where W: ?Sized + RedisWrite {
let ($(ref $name,)*) = *self;
$($name.write_redis_args(out);)*
}
#[allow(non_snake_case, unused_variables)]
fn is_single_arg(&self) -> bool {
let mut n = 0u32;
$(let $name = (); n += 1;)*
n == 1
}
}
to_redis_args_for_tuple_peel!($($name,)*);
)
}
/// This chips of the leading one and recurses for the rest. So if the first
/// iteration was T1, T2, T3 it will recurse to T2, T3. It stops for tuples
/// of size 1 (does not implement down to unit).
macro_rules! to_redis_args_for_tuple_peel {
($name:ident, $($other:ident,)*) => (to_redis_args_for_tuple!($($other,)*);)
}
to_redis_args_for_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, }
macro_rules! to_redis_args_for_array {
($($N:expr)+) => {
$(
impl<'a, T: ToRedisArgs> ToRedisArgs for &'a [T; $N] {
fn write_redis_args<W>(&self, out: &mut W) where W: ?Sized + RedisWrite {
ToRedisArgs::make_arg_vec(*self, out)
}
fn is_single_arg(&self) -> bool {
ToRedisArgs::is_single_vec_arg(*self)
}
}
)+
}
}
to_redis_args_for_array! {
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
/// This trait is used to convert a redis value into a more appropriate
/// type. While a redis `Value` can represent any response that comes
/// back from the redis server, usually you want to map this into something | /// that works better in rust. For instance you might want to convert the
/// return value into a `String` or an integer.
///
/// This trait is well supported throughout the library and you can
/// implement it for your own types if you want.
///
/// In addition to what you can see from the docs, this is also implemented
/// for tuples up to size 12 and for Vec<u8>.
pub trait FromRedisValue: Sized {
/// Given a redis `Value` this attempts to convert it into the given
/// destination type. If that fails because it's not compatible an
/// appropriate error is generated.
fn from_redis_value(v: &Value) -> RedisResult<Self>;
/// Similar to `from_redis_value` but constructs a vector of objects
/// from another vector of values. This primarily exists internally
/// to customize the behavior for vectors of tuples.
fn from_redis_values(items: &[Value]) -> RedisResult<Vec<Self>> {
let mut rv = vec![];
for item in items.iter() {
match FromRedisValue::from_redis_value(item) {
Ok(val) => rv.push(val),
Err(_) => {}
}
}
Ok(rv)
}
/// This only exists internally as a workaround for the lack of
/// specialization.
#[doc(hidden)]
fn from_byte_vec(_vec: &[u8]) -> Option<Vec<Self>> {
None
}
}
macro_rules! from_redis_value_for_num_internal {
($t:ty, $v:expr) => {{
let v = $v;
match *v {
Value::Int(val) => Ok(val as $t),
Value::Status(ref s) => match s.parse::<$t>() {
Ok(rv) => Ok(rv),
Err(_) => invalid_type_error!(v, "Could not convert from string."),
},
Value::Data(ref bytes) => match from_utf8(bytes)?.parse::<$t>() {
Ok(rv) => Ok(rv),
Err(_) => invalid_type_error!(v, "Could not convert from string."),
},
_ => invalid_type_error!(v, "Response type not convertible to numeric."),
}
}};
}
macro_rules! from_redis_value_for_num {
($t:ty) => {
impl FromRedisValue for $t {
fn from_redis_value(v: &Value) -> RedisResult<$t> {
from_redis_value_for_num_internal!($t, v)
}
}
};
}
impl FromRedisValue for u8 {
fn from_redis_value(v: &Value) -> RedisResult<u8> {
from_redis_value_for_num_internal!(u8, v)
}
fn from_byte_vec(vec: &[u8]) -> Option<Vec<u8>> {
Some(vec.to_vec())
}
}
from_redis_value_for_num!(i8);
from_redis_value_for_num!(i16);
from_redis_value_for_num!(u16);
from_redis_value_for_num!(i32);
from_redis_value_for_num!(u32);
from_redis_value_for_num!(i64);
from_redis_value_for_num!(u64);
from_redis_value_for_num!(f32);
from_redis_value_for_num!(f64);
from_redis_value_for_num!(isize);
from_redis_value_for_num!(usize);
impl FromRedisValue for bool {
fn from_redis_value(v: &Value) -> RedisResult<bool> {
match *v {
Value::Nil => Ok(false),
Value::Int(val) => Ok(val != 0),
Value::Status(ref s) => {
if &s[..] == "1" {
Ok(true)
} else if &s[..] == "0" {
Ok(false)
} else {
invalid_type_error!(v, "Response status not valid boolean");
}
}
Value::Okay => Ok(true),
_ => invalid_type_error!(v, "Response type not bool compatible."),
}
}
}
impl FromRedisValue for String {
fn from_redis_value(v: &Value) -> RedisResult<String> {
match *v {
Value::Data(ref bytes) => Ok(from_utf8(bytes)?.to_string()),
Value::Okay => Ok("OK".to_string()),
Value::Status(ref val) => Ok(val.to_string()),
_ => invalid_type_error!(v, "Response type not string compatible."),
}
}
}
impl<T: FromRedisValue> FromRedisValue for Vec<T> {
fn from_redis_value(v: &Value) -> RedisResult<Vec<T>> {
match *v {
// this hack allows us to specialize Vec<u8> to work with
// binary data whereas all others will fail with an error.
Value::Data(ref bytes) => match FromRedisValue::from_byte_vec(bytes) {
Some(x) => Ok(x),
None => invalid_type_error!(v, "Response type not vector compatible."),
},
Value::Bulk(ref items) => FromRedisValue::from_redis_values(items),
Value::Nil => Ok(vec![]),
_ => invalid_type_error!(v, "Response type not vector compatible."),
}
}
}
impl<K: FromRedisValue + Eq + Hash, V: FromRedisValue, S: BuildHasher + Default> FromRedisValue
for HashMap<K, V, S>
{
fn from_redis_value(v: &Value) -> RedisResult<HashMap<K, V, S>> {
match *v {
Value::Bulk(ref items) => {
let mut rv = HashMap::default();
let mut iter = items.iter();
loop {
let k = unwrap_or!(iter.next(), break);
let v = unwrap_or!(iter.next(), break);
rv.insert(from_redis_value(k)?, from_redis_value(v)?);
}
Ok(rv)
}
_ => invalid_type_error!(v, "Response type not hashmap compatible"),
}
}
}
impl<K: FromRedisValue + Eq + Hash, V: FromRedisValue> FromRedisValue for BTreeMap<K, V>
where
K: Ord,
{
fn from_redis_value(v: &Value) -> RedisResult<BTreeMap<K, V>> {
match *v {
Value::Bulk(ref items) => {
let mut rv = BTreeMap::new();
let mut iter = items.iter();
loop {
let k = unwrap_or!(iter.next(), break);
let v = unwrap_or!(iter.next(), break);
rv.insert(from_redis_value(k)?, from_redis_value(v)?);
}
Ok(rv)
}
_ => invalid_type_error!(v, "Response type not btreemap compatible"),
}
}
}
impl<T: FromRedisValue + Eq + Hash, S: BuildHasher + Default> FromRedisValue for HashSet<T, S> {
fn from_redis_value(v: &Value) -> RedisResult<HashSet<T, S>> {
match *v {
Value::Bulk(ref items) => {
let mut rv = HashSet::default();
for item in items.iter() {
rv.insert(from_redis_value(item)?);
}
Ok(rv)
}
_ => invalid_type_error!(v, "Response type not hashset compatible"),
}
}
}
impl<T: FromRedisValue + Eq + Hash> FromRedisValue for BTreeSet<T>
where
T: Ord,
{
fn from_redis_value(v: &Value) -> RedisResult<BTreeSet<T>> {
match *v {
Value::Bulk(ref items) => {
let mut rv = BTreeSet::new();
for item in items.iter() {
rv.insert(from_redis_value(item)?);
}
Ok(rv)
}
_ => invalid_type_error!(v, "Response type not btreeset compatible"),
}
}
}
impl FromRedisValue for Value {
fn from_redis_value(v: &Value) -> RedisResult<Value> {
Ok(v.clone())
}
}
impl FromRedisValue for () {
fn from_redis_value(_v: &Value) -> RedisResult<()> {
Ok(())
}
}
macro_rules! from_redis_value_for_tuple {
() => ();
($($name:ident,)+) => (
#[doc(hidden)]
impl<$($name: FromRedisValue),*> FromRedisValue for ($($name,)*) {
// we have local variables named T1 as dummies and those
// variables are unused.
#[allow(non_snake_case, unused_variables)]
fn from_redis_value(v: &Value) -> RedisResult<($($name,)*)> {
match *v {
Value::Bulk(ref items) => {
// hacky way to count the tuple size
let mut n = 0;
$(let $name = (); n += 1;)*
if items.len() != n {
invalid_type_error!(v, "Bulk response of wrong dimension")
}
// this is pretty ugly too. The { i += 1; i - 1} is rust's
// postfix increment :)
let mut i = 0;
Ok(($({let $name = (); from_redis_value(
&items[{ i += 1; i - 1 }])?},)*))
}
_ => invalid_type_error!(v, "Not a bulk response")
}
}
#[allow(non_snake_case, unused_variables)]
fn from_redis_values(items: &[Value]) -> RedisResult<Vec<($($name,)*)>> {
// hacky way to count the tuple size
let mut n = 0;
$(let $name = (); n += 1;)*
if items.len() % n != 0 {
invalid_type_error!(items, "Bulk response of wrong dimension")
}
// this is pretty ugly too. The { i += 1; i - 1} is rust's
// postfix increment :)
let mut rv = vec![];
if items.len() == 0 {
return Ok(rv)
}
let mut offset = 0;
while offset < items.len() - 1 {
rv.push(($({let $name = (); from_redis_value(
&items[{ offset += 1; offset - 1 }])?},)*));
}
Ok(rv)
}
}
from_redis_value_for_tuple_peel!($($name,)*);
)
}
/// This chips of the leading one and recurses for the rest. So if the first
/// iteration was T1, T2, T3 it will recurse to T2, T3. It stops for tuples
/// of size 1 (does not implement down to unit).
macro_rules! from_redis_value_for_tuple_peel {
($name:ident, $($other:ident,)*) => (from_redis_value_for_tuple!($($other,)*);)
}
from_redis_value_for_tuple! { T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, }
impl FromRedisValue for InfoDict {
fn from_redis_value(v: &Value) -> RedisResult<InfoDict> {
let s: String = from_redis_value(v)?;
Ok(InfoDict::new(&s))
}
}
impl<T: FromRedisValue> FromRedisValue for Option<T> {
fn from_redis_value(v: &Value) -> RedisResult<Option<T>> {
match *v {
Value::Nil => {
return Ok(None);
}
_ => {}
}
Ok(Some(from_redis_value(v)?))
}
}
/// A shortcut function to invoke `FromRedisValue::from_redis_value`
/// to make the API slightly nicer.
pub fn from_redis_value<T: FromRedisValue>(v: &Value) -> RedisResult<T> {
FromRedisValue::from_redis_value(v)
} | |
Compilation.js | /*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const async = require("async");
const crypto = require("crypto");
| const ModuleDependencyWarning = require("./ModuleDependencyWarning");
const ModuleDependencyError = require("./ModuleDependencyError");
const Module = require("./Module");
const Chunk = require("./Chunk");
const Entrypoint = require("./Entrypoint");
const Stats = require("./Stats");
const MainTemplate = require("./MainTemplate");
const ChunkTemplate = require("./ChunkTemplate");
const HotUpdateChunkTemplate = require("./HotUpdateChunkTemplate");
const ModuleTemplate = require("./ModuleTemplate");
const Dependency = require("./Dependency");
const ChunkRenderError = require("./ChunkRenderError");
const CachedSource = require("webpack-sources").CachedSource;
function byId(a, b) {
if(a.id < b.id) return -1;
if(a.id > b.id) return 1;
return 0;
}
function iterationBlockVariable(variables, fn) {
for(let indexVariable = 0; indexVariable < variables.length; indexVariable++) {
let varDep = variables[indexVariable].dependencies;
for(let indexVDep = 0; indexVDep < varDep.length; indexVDep++) {
fn(varDep[indexVDep]);
}
}
}
function iterationOfArrayCallback(arr, fn) {
for(let index = 0; index < arr.length; index++) {
fn(arr[index]);
}
}
class Compilation extends Tapable {
constructor(compiler) {
super();
this.compiler = compiler;
this.resolvers = compiler.resolvers;
this.inputFileSystem = compiler.inputFileSystem;
const options = this.options = compiler.options;
this.outputOptions = options && options.output;
this.bail = options && options.bail;
this.profile = options && options.profile;
this.performance = options && options.performance;
this.mainTemplate = new MainTemplate(this.outputOptions);
this.chunkTemplate = new ChunkTemplate(this.outputOptions);
this.hotUpdateChunkTemplate = new HotUpdateChunkTemplate(this.outputOptions);
this.moduleTemplate = new ModuleTemplate(this.outputOptions);
this.entries = [];
this.preparedChunks = [];
this.entrypoints = {};
this.chunks = [];
this.namedChunks = {};
this.modules = [];
this._modules = {};
this.cache = null;
this.records = null;
this.nextFreeModuleIndex = undefined;
this.nextFreeModuleIndex2 = undefined;
this.additionalChunkAssets = [];
this.assets = {};
this.errors = [];
this.warnings = [];
this.children = [];
this.dependencyFactories = new Map();
this.dependencyTemplates = new Map();
}
templatesPlugin(name, fn) {
this.mainTemplate.plugin(name, fn);
this.chunkTemplate.plugin(name, fn);
}
addModule(module, cacheGroup) {
const identifier = module.identifier();
if(this._modules[identifier]) {
return false;
}
const cacheName = (cacheGroup || "m") + identifier;
if(this.cache && this.cache[cacheName]) {
const cacheModule = this.cache[cacheName];
let rebuild = true;
if(!cacheModule.error && cacheModule.cacheable && this.fileTimestamps && this.contextTimestamps) {
rebuild = cacheModule.needRebuild(this.fileTimestamps, this.contextTimestamps);
}
if(!rebuild) {
cacheModule.disconnect();
this._modules[identifier] = cacheModule;
this.modules.push(cacheModule);
cacheModule.errors.forEach(err => this.errors.push(err), this);
cacheModule.warnings.forEach(err => this.warnings.push(err), this);
return cacheModule;
} else {
module.lastId = cacheModule.id;
}
}
module.unbuild();
this._modules[identifier] = module;
if(this.cache) {
this.cache[cacheName] = module;
}
this.modules.push(module);
return true;
}
getModule(module) {
const identifier = module.identifier();
return this._modules[identifier];
}
findModule(identifier) {
return this._modules[identifier];
}
buildModule(module, optional, origin, dependencies, thisCallback) {
this.applyPlugins1("build-module", module);
if(module.building) return module.building.push(thisCallback);
const building = module.building = [thisCallback];
function callback(err) {
module.building = undefined;
building.forEach(cb => cb(err));
}
module.build(this.options, this, this.resolvers.normal, this.inputFileSystem, (error) => {
const errors = module.errors;
for(let indexError = 0; indexError < errors.length; indexError++) {
const err = errors[indexError];
err.origin = origin;
err.dependencies = dependencies;
if(optional)
this.warnings.push(err);
else
this.errors.push(err);
}
const warnings = module.warnings;
for(let indexWarning = 0; indexWarning < warnings.length; indexWarning++) {
const war = warnings[indexWarning];
war.origin = origin;
war.dependencies = dependencies;
this.warnings.push(war);
}
module.dependencies.sort(Dependency.compare);
if(error) {
this.applyPlugins2("failed-module", module, error);
return callback(error);
}
this.applyPlugins1("succeed-module", module);
return callback();
});
}
processModuleDependencies(module, callback) {
const dependencies = [];
function addDependency(dep) {
for(let i = 0; i < dependencies.length; i++) {
if(dep.isEqualResource(dependencies[i][0])) {
return dependencies[i].push(dep);
}
}
dependencies.push([dep]);
}
function addDependenciesBlock(block) {
if(block.dependencies) {
iterationOfArrayCallback(block.dependencies, addDependency);
}
if(block.blocks) {
iterationOfArrayCallback(block.blocks, addDependenciesBlock);
}
if(block.variables) {
iterationBlockVariable(block.variables, addDependency);
}
}
addDependenciesBlock(module);
this.addModuleDependencies(module, dependencies, this.bail, null, true, callback);
}
addModuleDependencies(module, dependencies, bail, cacheGroup, recursive, callback) {
let _this = this;
const start = _this.profile && +new Date();
const factories = [];
for(let i = 0; i < dependencies.length; i++) {
const factory = _this.dependencyFactories.get(dependencies[i][0].constructor);
if(!factory) {
return callback(new Error(`No module factory available for dependency type: ${dependencies[i][0].constructor.name}`));
}
factories[i] = [factory, dependencies[i]];
}
async.forEach(factories, function iteratorFactory(item, callback) {
const dependencies = item[1];
const errorAndCallback = function errorAndCallback(err) {
err.origin = module;
_this.errors.push(err);
if(bail) {
callback(err);
} else {
callback();
}
};
const warningAndCallback = function warningAndCallback(err) {
err.origin = module;
_this.warnings.push(err);
callback();
};
const factory = item[0];
factory.create({
contextInfo: {
issuer: module.nameForCondition && module.nameForCondition(),
compiler: _this.compiler.name
},
context: module.context,
dependencies: dependencies
}, function factoryCallback(err, dependentModule) {
let afterFactory;
function isOptional() {
return dependencies.filter(d => !d.optional).length === 0;
}
function errorOrWarningAndCallback(err) {
if(isOptional()) {
return warningAndCallback(err);
} else {
return errorAndCallback(err);
}
}
function iterationDependencies(depend) {
for(let index = 0; index < depend.length; index++) {
const dep = depend[index];
dep.module = dependentModule;
dependentModule.addReason(module, dep);
}
}
if(err) {
return errorOrWarningAndCallback(new ModuleNotFoundError(module, err, dependencies));
}
if(!dependentModule) {
return process.nextTick(callback);
}
if(_this.profile) {
if(!dependentModule.profile) {
dependentModule.profile = {};
}
afterFactory = +new Date();
dependentModule.profile.factory = afterFactory - start;
}
dependentModule.issuer = module;
const newModule = _this.addModule(dependentModule, cacheGroup);
if(!newModule) { // from cache
dependentModule = _this.getModule(dependentModule);
if(dependentModule.optional) {
dependentModule.optional = isOptional();
}
iterationDependencies(dependencies);
if(_this.profile) {
if(!module.profile) {
module.profile = {};
}
const time = +new Date() - start;
if(!module.profile.dependencies || time > module.profile.dependencies) {
module.profile.dependencies = time;
}
}
return process.nextTick(callback);
}
if(newModule instanceof Module) {
if(_this.profile) {
newModule.profile = dependentModule.profile;
}
newModule.optional = isOptional();
newModule.issuer = dependentModule.issuer;
dependentModule = newModule;
iterationDependencies(dependencies);
if(_this.profile) {
const afterBuilding = +new Date();
module.profile.building = afterBuilding - afterFactory;
}
if(recursive) {
return process.nextTick(_this.processModuleDependencies.bind(_this, dependentModule, callback));
} else {
return process.nextTick(callback);
}
}
dependentModule.optional = isOptional();
iterationDependencies(dependencies);
_this.buildModule(dependentModule, isOptional(), module, dependencies, err => {
if(err) {
return errorOrWarningAndCallback(err);
}
if(_this.profile) {
const afterBuilding = +new Date();
dependentModule.profile.building = afterBuilding - afterFactory;
}
if(recursive) {
_this.processModuleDependencies(dependentModule, callback);
} else {
return callback();
}
});
});
}, function finalCallbackAddModuleDependencies(err) {
// In V8, the Error objects keep a reference to the functions on the stack. These warnings &
// errors are created inside closures that keep a reference to the Compilation, so errors are
// leaking the Compilation object. Setting _this to null workarounds the following issue in V8.
// https://bugs.chromium.org/p/chromium/issues/detail?id=612191
_this = null;
if(err) {
return callback(err);
}
return process.nextTick(callback);
});
}
_addModuleChain(context, dependency, onModule, callback) {
const start = this.profile && +new Date();
const errorAndCallback = this.bail ? function errorAndCallback(err) {
callback(err);
} : function errorAndCallback(err) {
err.dependencies = [dependency];
this.errors.push(err);
callback();
}.bind(this);
if(typeof dependency !== "object" || dependency === null || !dependency.constructor) {
throw new Error("Parameter 'dependency' must be a Dependency");
}
const moduleFactory = this.dependencyFactories.get(dependency.constructor);
if(!moduleFactory) {
throw new Error(`No dependency factory available for this dependency type: ${dependency.constructor.name}`);
}
moduleFactory.create({
contextInfo: {
issuer: "",
compiler: this.compiler.name
},
context: context,
dependencies: [dependency]
}, (err, module) => {
if(err) {
return errorAndCallback(new EntryModuleNotFoundError(err));
}
let afterFactory;
if(this.profile) {
if(!module.profile) {
module.profile = {};
}
afterFactory = +new Date();
module.profile.factory = afterFactory - start;
}
const result = this.addModule(module);
if(!result) {
module = this.getModule(module);
onModule(module);
if(this.profile) {
const afterBuilding = +new Date();
module.profile.building = afterBuilding - afterFactory;
}
return callback(null, module);
}
if(result instanceof Module) {
if(this.profile) {
result.profile = module.profile;
}
module = result;
onModule(module);
moduleReady.call(this);
return;
}
onModule(module);
this.buildModule(module, false, null, null, (err) => {
if(err) {
return errorAndCallback(err);
}
if(this.profile) {
const afterBuilding = +new Date();
module.profile.building = afterBuilding - afterFactory;
}
moduleReady.call(this);
});
function moduleReady() {
this.processModuleDependencies(module, err => {
if(err) {
return callback(err);
}
return callback(null, module);
});
}
});
}
addEntry(context, entry, name, callback) {
const slot = {
name: name,
module: null
};
this.preparedChunks.push(slot);
this._addModuleChain(context, entry, (module) => {
entry.module = module;
this.entries.push(module);
module.issuer = null;
}, (err, module) => {
if(err) {
return callback(err);
}
if(module) {
slot.module = module;
} else {
const idx = this.preparedChunks.indexOf(slot);
this.preparedChunks.splice(idx, 1);
}
return callback();
});
}
prefetch(context, dependency, callback) {
this._addModuleChain(context, dependency, module => {
module.prefetched = true;
module.issuer = null;
}, callback);
}
rebuildModule(module, thisCallback) {
if(module.variables.length || module.blocks.length)
throw new Error("Cannot rebuild a complex module with variables or blocks");
if(module.rebuilding) {
return module.rebuilding.push(thisCallback);
}
const rebuilding = module.rebuilding = [thisCallback];
function callback(err) {
module.rebuilding = undefined;
rebuilding.forEach(cb => cb(err));
}
const deps = module.dependencies.slice();
this.buildModule(module, false, module, null, (err) => {
if(err) return callback(err);
this.processModuleDependencies(module, (err) => {
if(err) return callback(err);
deps.forEach(d => {
if(d.module && d.module.removeReason(module, d)) {
module.chunks.forEach(chunk => {
if(!d.module.hasReasonForChunk(chunk)) {
if(d.module.removeChunk(chunk)) {
this.removeChunkFromDependencies(d.module, chunk);
}
}
});
}
});
callback();
});
});
}
finish() {
const modules = this.modules;
this.applyPlugins1("finish-modules", modules);
for(let index = 0; index < modules.length; index++) {
const module = modules[index];
this.reportDependencyErrorsAndWarnings(module, [module]);
}
}
unseal() {
this.applyPlugins0("unseal");
this.chunks.length = 0;
this.namedChunks = {};
this.additionalChunkAssets.length = 0;
this.assets = {};
this.modules.forEach(module => module.unseal());
}
seal(callback) {
const self = this;
self.applyPlugins0("seal");
self.nextFreeModuleIndex = 0;
self.nextFreeModuleIndex2 = 0;
self.preparedChunks.forEach(preparedChunk => {
const module = preparedChunk.module;
const chunk = self.addChunk(preparedChunk.name, module);
const entrypoint = self.entrypoints[chunk.name] = new Entrypoint(chunk.name);
entrypoint.unshiftChunk(chunk);
chunk.addModule(module);
module.addChunk(chunk);
chunk.entryModule = module;
self.assignIndex(module);
self.assignDepth(module);
self.processDependenciesBlockForChunk(module, chunk);
});
self.sortModules(self.modules);
self.applyPlugins0("optimize");
while(self.applyPluginsBailResult1("optimize-modules-basic", self.modules) ||
self.applyPluginsBailResult1("optimize-modules", self.modules) ||
self.applyPluginsBailResult1("optimize-modules-advanced", self.modules)); // eslint-disable-line no-extra-semi
self.applyPlugins1("after-optimize-modules", self.modules);
while(self.applyPluginsBailResult1("optimize-chunks-basic", self.chunks) ||
self.applyPluginsBailResult1("optimize-chunks", self.chunks) ||
self.applyPluginsBailResult1("optimize-chunks-advanced", self.chunks)); // eslint-disable-line no-extra-semi
self.applyPlugins1("after-optimize-chunks", self.chunks);
self.applyPluginsAsyncSeries("optimize-tree", self.chunks, self.modules, function sealPart2(err) {
if(err) {
return callback(err);
}
self.applyPlugins2("after-optimize-tree", self.chunks, self.modules);
const shouldRecord = self.applyPluginsBailResult("should-record") !== false;
self.applyPlugins2("revive-modules", self.modules, self.records);
self.applyPlugins1("optimize-module-order", self.modules);
self.applyPlugins1("advanced-optimize-module-order", self.modules);
self.applyPlugins1("before-module-ids", self.modules);
self.applyPlugins1("module-ids", self.modules);
self.applyModuleIds();
self.applyPlugins1("optimize-module-ids", self.modules);
self.applyPlugins1("after-optimize-module-ids", self.modules);
self.sortItemsWithModuleIds();
self.applyPlugins2("revive-chunks", self.chunks, self.records);
self.applyPlugins1("optimize-chunk-order", self.chunks);
self.applyPlugins1("before-chunk-ids", self.chunks);
self.applyChunkIds();
self.applyPlugins1("optimize-chunk-ids", self.chunks);
self.applyPlugins1("after-optimize-chunk-ids", self.chunks);
self.sortItemsWithChunkIds();
if(shouldRecord)
self.applyPlugins2("record-modules", self.modules, self.records);
if(shouldRecord)
self.applyPlugins2("record-chunks", self.chunks, self.records);
self.applyPlugins0("before-hash");
self.createHash();
self.applyPlugins0("after-hash");
if(shouldRecord)
self.applyPlugins1("record-hash", self.records);
self.applyPlugins0("before-module-assets");
self.createModuleAssets();
if(self.applyPluginsBailResult("should-generate-chunk-assets") !== false) {
self.applyPlugins0("before-chunk-assets");
self.createChunkAssets();
}
self.applyPlugins1("additional-chunk-assets", self.chunks);
self.summarizeDependencies();
if(shouldRecord)
self.applyPlugins2("record", self, self.records);
self.applyPluginsAsync("additional-assets", err => {
if(err) {
return callback(err);
}
self.applyPluginsAsync("optimize-chunk-assets", self.chunks, err => {
if(err) {
return callback(err);
}
self.applyPlugins1("after-optimize-chunk-assets", self.chunks);
self.applyPluginsAsync("optimize-assets", self.assets, err => {
if(err) {
return callback(err);
}
self.applyPlugins1("after-optimize-assets", self.assets);
if(self.applyPluginsBailResult("need-additional-seal")) {
self.unseal();
return self.seal(callback);
}
return self.applyPluginsAsync("after-seal", callback);
});
});
});
});
}
sortModules(modules) {
modules.sort((a, b) => {
if(a.index < b.index) return -1;
if(a.index > b.index) return 1;
return 0;
});
}
reportDependencyErrorsAndWarnings(module, blocks) {
for(let indexBlock = 0; indexBlock < blocks.length; indexBlock++) {
const block = blocks[indexBlock];
const dependencies = block.dependencies;
for(let indexDep = 0; indexDep < dependencies.length; indexDep++) {
const d = dependencies[indexDep];
const warnings = d.getWarnings();
if(warnings) {
for(let indexWar = 0; indexWar < warnings.length; indexWar++) {
const w = warnings[indexWar];
const warning = new ModuleDependencyWarning(module, w, d.loc);
this.warnings.push(warning);
}
}
const errors = d.getErrors();
if(errors) {
for(let indexErr = 0; indexErr < errors.length; indexErr++) {
const e = errors[indexErr];
const error = new ModuleDependencyError(module, e, d.loc);
this.errors.push(error);
}
}
}
this.reportDependencyErrorsAndWarnings(module, block.blocks);
}
}
addChunk(name, module, loc) {
if(name) {
if(Object.prototype.hasOwnProperty.call(this.namedChunks, name)) {
const chunk = this.namedChunks[name];
if(module) {
chunk.addOrigin(module, loc);
}
return chunk;
}
}
const chunk = new Chunk(name, module, loc);
this.chunks.push(chunk);
if(name) {
this.namedChunks[name] = chunk;
}
return chunk;
}
assignIndex(module) {
const _this = this;
const queue = [() => {
assignIndexToModule(module);
}];
const iteratorAllDependencies = d => {
queue.push(() => assignIndexToDependency(d));
};
function assignIndexToModule(module) {
// enter module
if(typeof module.index !== "number") {
module.index = _this.nextFreeModuleIndex++;
// leave module
queue.push(() => module.index2 = _this.nextFreeModuleIndex2++);
// enter it as block
assignIndexToDependencyBlock(module);
}
}
function assignIndexToDependency(dependency) {
if(dependency.module) {
queue.push(() => assignIndexToModule(dependency.module));
}
}
function assignIndexToDependencyBlock(block) {
let allDependencies = [];
function iteratorDependency(d) {
allDependencies.push(d);
}
function iteratorBlock(b) {
queue.push(() => assignIndexToDependencyBlock(b));
}
if(block.variables) {
iterationBlockVariable(block.variables, iteratorDependency);
}
if(block.dependencies) {
iterationOfArrayCallback(block.dependencies, iteratorDependency);
}
if(block.blocks) {
const blocks = block.blocks;
let indexBlock = blocks.length;
while(indexBlock--) {
iteratorBlock(blocks[indexBlock]);
}
}
let indexAll = allDependencies.length;
while(indexAll--) {
iteratorAllDependencies(allDependencies[indexAll]);
}
}
while(queue.length) {
queue.pop()();
}
}
assignDepth(module) {
function assignDepthToModule(module, depth) {
// enter module
if(typeof module.depth === "number" && module.depth <= depth) return;
module.depth = depth;
// enter it as block
assignDepthToDependencyBlock(module, depth + 1);
}
function assignDepthToDependency(dependency, depth) {
if(dependency.module) {
queue.push(() => assignDepthToModule(dependency.module, depth));
}
}
function assignDepthToDependencyBlock(block, depth) {
function iteratorDependency(d) {
assignDepthToDependency(d, depth);
}
function iteratorBlock(b) {
assignDepthToDependencyBlock(b, depth);
}
if(block.variables) {
iterationBlockVariable(block.variables, iteratorDependency);
}
if(block.dependencies) {
iterationOfArrayCallback(block.dependencies, iteratorDependency);
}
if(block.blocks) {
iterationOfArrayCallback(block.blocks, iteratorBlock);
}
}
const queue = [() => {
assignDepthToModule(module, 0);
}];
while(queue.length) {
queue.pop()();
}
}
processDependenciesBlockForChunk(block, chunk) {
const iteratorBlock = b => {
let c;
if(!b.chunks) {
c = this.addChunk(b.chunkName, b.module, b.loc);
b.chunks = [c];
c.addBlock(b);
} else {
c = b.chunks[0];
}
chunk.addChunk(c);
c.addParent(chunk);
queue.push([b, c]);
};
const iteratorDependency = d => {
if(!d.module) {
return;
}
if(d.weak) {
return;
}
if(chunk.addModule(d.module)) {
d.module.addChunk(chunk);
queue.push([d.module, chunk]);
}
};
const queue = [
[block, chunk]
];
while(queue.length) {
const queueItem = queue.pop();
block = queueItem[0];
chunk = queueItem[1];
if(block.variables) {
iterationBlockVariable(block.variables, iteratorDependency);
}
if(block.dependencies) {
iterationOfArrayCallback(block.dependencies, iteratorDependency);
}
if(block.blocks) {
iterationOfArrayCallback(block.blocks, iteratorBlock);
}
}
}
removeChunkFromDependencies(block, chunk) {
const iteratorDependency = d => {
if(!d.module) {
return;
}
if(!d.module.hasReasonForChunk(chunk)) {
if(d.module.removeChunk(chunk)) {
this.removeChunkFromDependencies(d.module, chunk);
}
}
};
const blocks = block.blocks;
for(let indexBlock = 0; indexBlock < blocks.length; indexBlock++) {
const chunks = blocks[indexBlock].chunks;
for(let indexChunk = 0; indexChunk < chunks.length; indexChunk++) {
const blockChunk = chunks[indexChunk];
chunk.removeChunk(blockChunk);
blockChunk.removeParent(chunk);
this.removeChunkFromDependencies(chunks, blockChunk);
}
}
if(block.dependencies) {
iterationOfArrayCallback(block.dependencies, iteratorDependency);
}
if(block.variables) {
iterationBlockVariable(block.variables, iteratorDependency);
}
}
applyModuleIds() {
let unusedIds = [];
let nextFreeModuleId = 0;
let usedIds = [];
// TODO consider Map when performance has improved https://gist.github.com/sokra/234c077e1299b7369461f1708519c392
const usedIdMap = Object.create(null);
if(this.usedModuleIds) {
Object.keys(this.usedModuleIds).forEach(key => {
const id = this.usedModuleIds[key];
if(!usedIdMap[id]) {
usedIds.push(id);
usedIdMap[id] = true;
}
});
}
const modules1 = this.modules;
for(let indexModule1 = 0; indexModule1 < modules1.length; indexModule1++) {
const module1 = modules1[indexModule1];
if(module1.id && !usedIdMap[module1.id]) {
usedIds.push(module1.id);
usedIdMap[module1.id] = true;
}
}
if(usedIds.length > 0) {
let usedIdMax = -1;
for(let index = 0; index < usedIds.length; index++) {
const usedIdKey = usedIds[index];
if(typeof usedIdKey !== "number") {
continue;
}
usedIdMax = Math.max(usedIdMax, usedIdKey);
}
let lengthFreeModules = nextFreeModuleId = usedIdMax + 1;
while(lengthFreeModules--) {
if(!usedIdMap[lengthFreeModules]) {
unusedIds.push(lengthFreeModules);
}
}
}
const modules2 = this.modules;
for(let indexModule2 = 0; indexModule2 < modules2.length; indexModule2++) {
const module2 = modules2[indexModule2];
if(module2.id === null) {
if(unusedIds.length > 0)
module2.id = unusedIds.pop();
else
module2.id = nextFreeModuleId++;
}
}
}
applyChunkIds() {
const unusedIds = [];
let nextFreeChunkId = 0;
function getNextFreeChunkId(usedChunkIds) {
const keyChunks = Object.keys(usedChunkIds);
let result = -1;
for(let index = 0; index < keyChunks.length; index++) {
const usedIdKey = keyChunks[index];
const usedIdValue = usedChunkIds[usedIdKey];
if(typeof usedIdValue !== "number") {
continue;
}
result = Math.max(result, usedIdValue);
}
return result;
}
if(this.usedChunkIds) {
nextFreeChunkId = getNextFreeChunkId(this.usedChunkIds) + 1;
let index = nextFreeChunkId;
while(index--) {
if(this.usedChunkIds[index] !== index) {
unusedIds.push(index);
}
}
}
const chunks = this.chunks;
for(let indexChunk = 0; indexChunk < chunks.length; indexChunk++) {
const chunk = chunks[indexChunk];
if(chunk.id === null) {
if(unusedIds.length > 0)
chunk.id = unusedIds.pop();
else
chunk.id = nextFreeChunkId++;
}
if(!chunk.ids) {
chunk.ids = [chunk.id];
}
}
}
sortItemsWithModuleIds() {
this.modules.sort(byId);
const modules = this.modules;
for(let indexModule = 0; indexModule < modules.length; indexModule++) {
modules[indexModule].sortItems();
}
const chunks = this.chunks;
for(let indexChunk = 0; indexChunk < chunks.length; indexChunk++) {
chunks[indexChunk].sortItems();
}
}
sortItemsWithChunkIds() {
this.chunks.sort(byId);
const modules = this.modules;
for(let indexModule = 0; indexModule < modules.length; indexModule++) {
modules[indexModule].sortItems();
}
const chunks = this.chunks;
for(let indexChunk = 0; indexChunk < chunks.length; indexChunk++) {
chunks[indexChunk].sortItems();
}
}
summarizeDependencies() {
function filterDups(array) {
const newArray = [];
for(let i = 0; i < array.length; i++) {
if(i === 0 || array[i - 1] !== array[i])
newArray.push(array[i]);
}
return newArray;
}
this.fileDependencies = (this.compilationDependencies || []).slice();
this.contextDependencies = [];
this.missingDependencies = [];
const children = this.children;
for(let indexChildren = 0; indexChildren < children.length; indexChildren++) {
const child = children[indexChildren];
this.fileDependencies = this.fileDependencies.concat(child.fileDependencies);
this.contextDependencies = this.contextDependencies.concat(child.contextDependencies);
this.missingDependencies = this.missingDependencies.concat(child.missingDependencies);
}
const modules = this.modules;
for(let indexModule = 0; indexModule < modules.length; indexModule++) {
const module = modules[indexModule];
if(module.fileDependencies) {
const fileDependencies = module.fileDependencies;
for(let indexFileDep = 0; indexFileDep < fileDependencies.length; indexFileDep++) {
this.fileDependencies.push(fileDependencies[indexFileDep]);
}
}
if(module.contextDependencies) {
const contextDependencies = module.contextDependencies;
for(let indexContextDep = 0; indexContextDep < contextDependencies.length; indexContextDep++) {
this.contextDependencies.push(contextDependencies[indexContextDep]);
}
}
}
this.errors.forEach(error => {
if(Array.isArray(error.missing)) {
error.missing.forEach(item => this.missingDependencies.push(item));
}
});
this.fileDependencies.sort();
this.fileDependencies = filterDups(this.fileDependencies);
this.contextDependencies.sort();
this.contextDependencies = filterDups(this.contextDependencies);
this.missingDependencies.sort();
this.missingDependencies = filterDups(this.missingDependencies);
}
createHash() {
const outputOptions = this.outputOptions;
const hashFunction = outputOptions.hashFunction;
const hashDigest = outputOptions.hashDigest;
const hashDigestLength = outputOptions.hashDigestLength;
const hash = crypto.createHash(hashFunction);
if(outputOptions.hashSalt)
hash.update(outputOptions.hashSalt);
this.mainTemplate.updateHash(hash);
this.chunkTemplate.updateHash(hash);
this.moduleTemplate.updateHash(hash);
this.children.forEach(function(child) {
hash.update(child.hash);
});
// clone needed as sort below is inplace mutation
const chunks = this.chunks.slice();
/**
* sort here will bring all "falsy" values to the beginning
* this is needed as the "hasRuntime()" chunks are dependent on the
* hashes of the non-runtime chunks.
*/
chunks.sort((a, b) => {
const aEntry = a.hasRuntime();
const bEntry = b.hasRuntime();
if(aEntry && !bEntry) return 1;
if(!aEntry && bEntry) return -1;
return 0;
});
for(let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
const chunkHash = crypto.createHash(hashFunction);
if(outputOptions.hashSalt)
chunkHash.update(outputOptions.hashSalt);
chunk.updateHash(chunkHash);
if(chunk.hasRuntime()) {
this.mainTemplate.updateHashForChunk(chunkHash, chunk);
} else {
this.chunkTemplate.updateHashForChunk(chunkHash, chunk);
}
this.applyPlugins2("chunk-hash", chunk, chunkHash);
chunk.hash = chunkHash.digest(hashDigest);
hash.update(chunk.hash);
chunk.renderedHash = chunk.hash.substr(0, hashDigestLength);
}
this.fullHash = hash.digest(hashDigest);
this.hash = this.fullHash.substr(0, hashDigestLength);
}
modifyHash(update) {
const outputOptions = this.outputOptions;
const hashFunction = outputOptions.hashFunction;
const hashDigest = outputOptions.hashDigest;
const hashDigestLength = outputOptions.hashDigestLength;
const hash = crypto.createHash(hashFunction);
hash.update(this.fullHash);
hash.update(update);
this.fullHash = hash.digest(hashDigest);
this.hash = this.fullHash.substr(0, hashDigestLength);
}
createModuleAssets() {
for(let i = 0; i < this.modules.length; i++) {
const module = this.modules[i];
if(module.assets) {
Object.keys(module.assets).forEach((assetName) => {
const fileName = this.getPath(assetName);
this.assets[fileName] = module.assets[assetName];
this.applyPlugins2("module-asset", module, fileName);
});
}
}
}
createChunkAssets() {
const outputOptions = this.outputOptions;
const filename = outputOptions.filename;
const chunkFilename = outputOptions.chunkFilename;
for(let i = 0; i < this.chunks.length; i++) {
const chunk = this.chunks[i];
chunk.files = [];
const chunkHash = chunk.hash;
let source;
let file;
const filenameTemplate = chunk.filenameTemplate ? chunk.filenameTemplate :
chunk.isInitial() ? filename :
chunkFilename;
try {
const useChunkHash = !chunk.hasRuntime() || (this.mainTemplate.useChunkHash && this.mainTemplate.useChunkHash(chunk));
const usedHash = useChunkHash ? chunkHash : this.fullHash;
const cacheName = "c" + chunk.id;
if(this.cache && this.cache[cacheName] && this.cache[cacheName].hash === usedHash) {
source = this.cache[cacheName].source;
} else {
if(chunk.hasRuntime()) {
source = this.mainTemplate.render(this.hash, chunk, this.moduleTemplate, this.dependencyTemplates);
} else {
source = this.chunkTemplate.render(chunk, this.moduleTemplate, this.dependencyTemplates);
}
if(this.cache) {
this.cache[cacheName] = {
hash: usedHash,
source: source = (source instanceof CachedSource ? source : new CachedSource(source))
};
}
}
file = this.getPath(filenameTemplate, {
noChunkHash: !useChunkHash,
chunk
});
if(this.assets[file])
throw new Error(`Conflict: Multiple assets emit to the same filename ${file}`);
this.assets[file] = source;
chunk.files.push(file);
this.applyPlugins2("chunk-asset", chunk, file);
} catch(err) {
this.errors.push(new ChunkRenderError(chunk, file || filenameTemplate, err));
}
}
}
getPath(filename, data) {
data = data || {};
data.hash = data.hash || this.hash;
return this.mainTemplate.applyPluginsWaterfall("asset-path", filename, data);
}
getStats() {
return new Stats(this);
}
createChildCompiler(name, outputOptions) {
return this.compiler.createChildCompiler(this, name, outputOptions);
}
checkConstraints() {
const usedIds = {};
const modules = this.modules;
for(let indexModule = 0; indexModule < modules.length; indexModule++) {
const moduleId = modules[indexModule].id;
if(usedIds[moduleId])
throw new Error(`checkConstraints: duplicate module id ${moduleId}`);
}
const chunks = this.chunks;
for(let indexChunk = 0; indexChunk < chunks.length; indexChunk++) {
const chunk = chunks[indexChunk];
if(chunks.indexOf(chunk) !== indexChunk)
throw new Error(`checkConstraints: duplicate chunk in compilation ${chunk.debugId}`);
chunk.checkConstraints();
}
}
}
module.exports = Compilation; | const Tapable = require("tapable");
const EntryModuleNotFoundError = require("./EntryModuleNotFoundError");
const ModuleNotFoundError = require("./ModuleNotFoundError");
|
invvect_test.go | // Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/palcoin-project/palcd/chaincfg/chainhash"
)
// TestInvVectStringer tests the stringized output for inventory vector types.
func TestInvTypeStringer(t *testing.T) {
tests := []struct {
in InvType
want string
}{
{InvTypeError, "ERROR"},
{InvTypeTx, "MSG_TX"},
{InvTypeBlock, "MSG_BLOCK"},
{0xffffffff, "Unknown InvType (4294967295)"},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
// TestInvVect tests the InvVect API.
func | (t *testing.T) {
ivType := InvTypeBlock
hash := chainhash.Hash{}
// Ensure we get the same payload and signature back out.
iv := NewInvVect(ivType, &hash)
if iv.Type != ivType {
t.Errorf("NewInvVect: wrong type - got %v, want %v",
iv.Type, ivType)
}
if !iv.Hash.IsEqual(&hash) {
t.Errorf("NewInvVect: wrong hash - got %v, want %v",
spew.Sdump(iv.Hash), spew.Sdump(hash))
}
}
// TestInvVectWire tests the InvVect wire encode and decode for various
// protocol versions and supported inventory vector types.
func TestInvVectWire(t *testing.T) {
// Block 203707 hash.
hashStr := "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc"
baseHash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// errInvVect is an inventory vector with an error.
errInvVect := InvVect{
Type: InvTypeError,
Hash: chainhash.Hash{},
}
// errInvVectEncoded is the wire encoded bytes of errInvVect.
errInvVectEncoded := []byte{
0x00, 0x00, 0x00, 0x00, // InvTypeError
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // No hash
}
// txInvVect is an inventory vector representing a transaction.
txInvVect := InvVect{
Type: InvTypeTx,
Hash: *baseHash,
}
// txInvVectEncoded is the wire encoded bytes of txInvVect.
txInvVectEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // InvTypeTx
0xdc, 0xe9, 0x69, 0x10, 0x94, 0xda, 0x23, 0xc7,
0xe7, 0x67, 0x13, 0xd0, 0x75, 0xd4, 0xa1, 0x0b,
0x79, 0x40, 0x08, 0xa6, 0x36, 0xac, 0xc2, 0x4b,
0x26, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block 203707 hash
}
// blockInvVect is an inventory vector representing a block.
blockInvVect := InvVect{
Type: InvTypeBlock,
Hash: *baseHash,
}
// blockInvVectEncoded is the wire encoded bytes of blockInvVect.
blockInvVectEncoded := []byte{
0x02, 0x00, 0x00, 0x00, // InvTypeBlock
0xdc, 0xe9, 0x69, 0x10, 0x94, 0xda, 0x23, 0xc7,
0xe7, 0x67, 0x13, 0xd0, 0x75, 0xd4, 0xa1, 0x0b,
0x79, 0x40, 0x08, 0xa6, 0x36, 0xac, 0xc2, 0x4b,
0x26, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block 203707 hash
}
tests := []struct {
in InvVect // NetAddress to encode
out InvVect // Expected decoded NetAddress
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
}{
// Latest protocol version error inventory vector.
{
errInvVect,
errInvVect,
errInvVectEncoded,
ProtocolVersion,
},
// Latest protocol version tx inventory vector.
{
txInvVect,
txInvVect,
txInvVectEncoded,
ProtocolVersion,
},
// Latest protocol version block inventory vector.
{
blockInvVect,
blockInvVect,
blockInvVectEncoded,
ProtocolVersion,
},
// Protocol version BIP0035Version error inventory vector.
{
errInvVect,
errInvVect,
errInvVectEncoded,
BIP0035Version,
},
// Protocol version BIP0035Version tx inventory vector.
{
txInvVect,
txInvVect,
txInvVectEncoded,
BIP0035Version,
},
// Protocol version BIP0035Version block inventory vector.
{
blockInvVect,
blockInvVect,
blockInvVectEncoded,
BIP0035Version,
},
// Protocol version BIP0031Version error inventory vector.
{
errInvVect,
errInvVect,
errInvVectEncoded,
BIP0031Version,
},
// Protocol version BIP0031Version tx inventory vector.
{
txInvVect,
txInvVect,
txInvVectEncoded,
BIP0031Version,
},
// Protocol version BIP0031Version block inventory vector.
{
blockInvVect,
blockInvVect,
blockInvVectEncoded,
BIP0031Version,
},
// Protocol version NetAddressTimeVersion error inventory vector.
{
errInvVect,
errInvVect,
errInvVectEncoded,
NetAddressTimeVersion,
},
// Protocol version NetAddressTimeVersion tx inventory vector.
{
txInvVect,
txInvVect,
txInvVectEncoded,
NetAddressTimeVersion,
},
// Protocol version NetAddressTimeVersion block inventory vector.
{
blockInvVect,
blockInvVect,
blockInvVectEncoded,
NetAddressTimeVersion,
},
// Protocol version MultipleAddressVersion error inventory vector.
{
errInvVect,
errInvVect,
errInvVectEncoded,
MultipleAddressVersion,
},
// Protocol version MultipleAddressVersion tx inventory vector.
{
txInvVect,
txInvVect,
txInvVectEncoded,
MultipleAddressVersion,
},
// Protocol version MultipleAddressVersion block inventory vector.
{
blockInvVect,
blockInvVect,
blockInvVectEncoded,
MultipleAddressVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
var buf bytes.Buffer
err := writeInvVect(&buf, test.pver, &test.in)
if err != nil {
t.Errorf("writeInvVect #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("writeInvVect #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from wire format.
var iv InvVect
rbuf := bytes.NewReader(test.buf)
err = readInvVect(rbuf, test.pver, &iv)
if err != nil {
t.Errorf("readInvVect #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(iv, test.out) {
t.Errorf("readInvVect #%d\n got: %s want: %s", i,
spew.Sdump(iv), spew.Sdump(test.out))
continue
}
}
}
| TestInvVect |
Employee.ts | import axios from 'axios';
import { headers } from '../constants/ZenConstants';
import { AuthPayload, AuthResponse, AuthUser } from '../types/Auth';
const LOGIN_URL = `/api/auth/signin`;
const GET_CURRENT_EMPLOYEE_URL = `/api/employees/current`;
| ): Promise<AuthResponse> => {
try {
const response = await axios.post(LOGIN_URL, payload);
return response.data;
} catch (error) {
return Promise.reject(error);
}
};
export const getCurrentEmployee = async (): Promise<AuthUser> => {
try {
const response = await axios.get(GET_CURRENT_EMPLOYEE_URL, {
headers: headers()
});
return response.data;
} catch (error) {
return Promise.reject(error);
}
}; | export const getAuthToken = async (
payload: AuthPayload |
execplan.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"reflect"
"github.com/cockroachdb/cockroach/pkg/col/coltypes"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/execerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/errors"
)
func checkNumIn(inputs []Operator, numIn int) error {
if len(inputs) != numIn {
return errors.Errorf("expected %d input(s), got %d", numIn, len(inputs))
}
return nil
}
// wrapRowSource, given an input Operator, integrates toWrap into a columnar
// execution flow and returns toWrap's output as an Operator.
func wrapRowSource(
ctx context.Context,
flowCtx *execinfra.FlowCtx,
input Operator,
inputTypes []types.T,
acc *mon.BoundAccount,
newToWrap func(execinfra.RowSource) (execinfra.RowSource, error),
) (*Columnarizer, error) |
// NewColOperatorResult is a helper struct that encompasses all of the return
// values of NewColOperator call.
type NewColOperatorResult struct {
Op Operator
ColumnTypes []types.T
InternalMemUsage int
MetadataSources []execinfrapb.MetadataSource
IsStreaming bool
BufferingOpMemMonitors []*mon.BytesMonitor
BufferingOpMemAccounts []*mon.BoundAccount
}
// joinerPlanningState is a helper struct used when creating a hash or merge
// joiner to track the planning state.
type joinerPlanningState struct {
// postJoinerProjection is the projection that has to be added after a
// joiner. It is needed because the joiners always output all the requested
// columns from the left side first followed by the columns from the right
// side. However, post.OutputColumns projection can have an arbitrary order
// of columns, and postJoinerProjection behaves as an "adapter" between the
// output of the joiner and the requested post.OutputColumns projection.
postJoinerProjection []uint32
// postFilterPlanning will be set by the operators that handle the
// projection themselves. This is needed to handle post.Filter correctly so
// that those operators output all the columns that are used by post.Filter
// even if some columns are not needed by post.OutputColumns. If it remains
// unset, then postFilterPlanning will act as a noop.
postFilterPlanning filterPlanningState
}
// createJoiner adds a new hash or merge join with the argument function
// createJoinOpWithOnExprPlanning distinguishing between the two.
// Note: the passed in 'result' will be modified accordingly.
func createJoiner(
ctx context.Context,
result *NewColOperatorResult,
flowCtx *execinfra.FlowCtx,
spec *execinfrapb.ProcessorSpec,
inputs []Operator,
acc *mon.BoundAccount,
planningState *joinerPlanningState,
joinType sqlbase.JoinType,
createJoinOpWithOnExprPlanning func(
result *NewColOperatorResult,
leftTypes, rightTypes []coltypes.T,
leftOutCols, rightOutCols []uint32,
) (*execinfrapb.Expression, filterPlanningState, []uint32, []uint32, error),
) error {
var err error
if err = checkNumIn(inputs, 2); err != nil {
return err
}
post := &spec.Post
var leftTypes, rightTypes []coltypes.T
leftTypes, err = typeconv.FromColumnTypes(spec.Input[0].ColumnTypes)
if err != nil {
return err
}
rightTypes, err = typeconv.FromColumnTypes(spec.Input[1].ColumnTypes)
if err != nil {
return err
}
nLeftCols := uint32(len(leftTypes))
nRightCols := uint32(len(rightTypes))
leftOutCols := make([]uint32, 0)
rightOutCols := make([]uint32, 0)
// Note that we do not need a special treatment in case of LEFT SEMI and
// LEFT ANTI joins when setting up outCols because in such cases there will
// be a projection with post.OutputColumns already projecting out the right
// side.
if post.Projection {
for _, col := range post.OutputColumns {
if col < nLeftCols {
leftOutCols = append(leftOutCols, col)
} else {
rightOutCols = append(rightOutCols, col-nLeftCols)
}
}
// Now that we know how many columns are output from the left side, we
// can populate the "post-joiner" projection. Consider an example:
// we have post.OutputColumns = {6, 2, 5, 7, 0, 3} with nLeftCols = 6.
// We've just populated output columns as follows:
// leftOutCols = {2, 5, 0, 3} and rightOutCols = {6, 7},
// and because the joiner always outputs the left columns first, the output
// will look as {2, 5, 0, 3, 6, 7}, so we need to add an extra projection.
// The code below will populate postJoinerProjection with
// {4, 0, 1, 5, 2, 3}.
// Note that we don't need to pay attention to any filter planning
// additions since those will be projected out before we will add this
// "post-joiner" projection.
var lOutIdx, rOutIdx uint32
for _, col := range post.OutputColumns {
if col < nLeftCols {
planningState.postJoinerProjection = append(planningState.postJoinerProjection, lOutIdx)
lOutIdx++
} else {
planningState.postJoinerProjection = append(planningState.postJoinerProjection, uint32(len(leftOutCols))+rOutIdx)
rOutIdx++
}
}
} else {
for i := uint32(0); i < nLeftCols; i++ {
leftOutCols = append(leftOutCols, i)
}
for i := uint32(0); i < nRightCols; i++ {
rightOutCols = append(rightOutCols, i)
}
}
if !post.Filter.Empty() {
planningState.postFilterPlanning = makeFilterPlanningState(len(leftTypes), len(rightTypes))
leftOutCols, rightOutCols, err = planningState.postFilterPlanning.renderAllNeededCols(
post.Filter, leftOutCols, rightOutCols,
)
if err != nil {
return err
}
}
var (
onExpr *execinfrapb.Expression
onExprPlanning filterPlanningState
)
onExpr, onExprPlanning, leftOutCols, rightOutCols, err = createJoinOpWithOnExprPlanning(
result, leftTypes, rightTypes, leftOutCols, rightOutCols,
)
if err != nil {
return err
}
result.setProjectedByJoinerColumnTypes(spec, leftOutCols, rightOutCols)
if onExpr != nil && joinType == sqlbase.JoinType_INNER {
// We will plan other Operators on top of the joiners, so we need to
// account for the internal memory explicitly.
if internalMemOp, ok := result.Op.(InternalMemoryOperator); ok {
result.InternalMemUsage += internalMemOp.InternalMemoryUsage()
}
err = result.planFilterExpr(ctx, flowCtx.NewEvalCtx(), *onExpr, onExprPlanning.indexVarMap, acc)
onExprPlanning.projectOutExtraCols(result)
}
return err
}
// NewColOperator creates a new columnar operator according to the given spec.
// useStreamingMemAccountForBuffering specifies whether to use
// streamingMemAccount when creating buffering operators and should only be set
// to 'true' in tests.
func NewColOperator(
ctx context.Context,
flowCtx *execinfra.FlowCtx,
spec *execinfrapb.ProcessorSpec,
inputs []Operator,
streamingMemAccount *mon.BoundAccount,
useStreamingMemAccountForBuffering bool,
) (result NewColOperatorResult, err error) {
log.VEventf(ctx, 2, "planning col operator for spec %q", spec)
core := &spec.Core
post := &spec.Post
var planningState joinerPlanningState
// By default, we safely assume that an operator is not streaming. Note that
// projections, renders, filters, limits, offsets as well as all internal
// operators (like stats collectors and cancel checkers) are streaming, so in
// order to determine whether the resulting chain of operators is streaming,
// it is sufficient to look only at the "core" operator.
result.IsStreaming = false
switch {
case core.Noop != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
result.Op, result.IsStreaming = NewNoop(inputs[0]), true
result.ColumnTypes = spec.Input[0].ColumnTypes
case core.TableReader != nil:
if err := checkNumIn(inputs, 0); err != nil {
return result, err
}
if core.TableReader.IsCheck {
return result, errors.Newf("scrub table reader is unsupported in vectorized")
}
var scanOp *colBatchScan
scanOp, err = newColBatchScan(NewAllocator(ctx, streamingMemAccount), flowCtx, core.TableReader, post)
if err != nil {
return result, err
}
result.Op, result.IsStreaming = scanOp, true
result.MetadataSources = append(result.MetadataSources, scanOp)
// colBatchScan is wrapped with a cancel checker below, so we need to
// log its creation separately.
log.VEventf(ctx, 1, "made op %T\n", result.Op)
// We want to check for cancellation once per input batch, and wrapping
// only colBatchScan with a CancelChecker allows us to do just that.
// It's sufficient for most of the operators since they are extremely fast.
// However, some of the long-running operators (for example, sorter) are
// still responsible for doing the cancellation check on their own while
// performing long operations.
result.Op = NewCancelChecker(result.Op)
returnMutations := core.TableReader.Visibility == execinfrapb.ScanVisibility_PUBLIC_AND_NOT_PUBLIC
result.ColumnTypes = core.TableReader.Table.ColumnTypesWithMutations(returnMutations)
case core.Aggregator != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
aggSpec := core.Aggregator
if len(aggSpec.Aggregations) == 0 {
// We can get an aggregator when no aggregate functions are present if
// HAVING clause is present, for example, with a query as follows:
// SELECT 1 FROM t HAVING true. In this case, we plan a special operator
// that outputs a batch of length 1 without actual columns once and then
// zero-length batches. The actual "data" will be added by projections
// below.
// TODO(solon): The distsql plan for this case includes a TableReader, so
// we end up creating an orphaned colBatchScan. We should avoid that.
// Ideally the optimizer would not plan a scan in this unusual case.
result.Op, result.IsStreaming, err = NewSingleTupleNoInputOp(NewAllocator(ctx, streamingMemAccount)), true, nil
// We make ColumnTypes non-nil so that sanity check doesn't panic.
result.ColumnTypes = make([]types.T, 0)
break
}
if len(aggSpec.GroupCols) == 0 &&
len(aggSpec.Aggregations) == 1 &&
aggSpec.Aggregations[0].FilterColIdx == nil &&
aggSpec.Aggregations[0].Func == execinfrapb.AggregatorSpec_COUNT_ROWS &&
!aggSpec.Aggregations[0].Distinct {
result.Op, result.IsStreaming, err = NewCountOp(NewAllocator(ctx, streamingMemAccount), inputs[0]), true, nil
result.ColumnTypes = []types.T{*types.Int}
break
}
var groupCols, orderedCols util.FastIntSet
for _, col := range aggSpec.OrderedGroupCols {
orderedCols.Add(int(col))
}
needHash := false
for _, col := range aggSpec.GroupCols {
if !orderedCols.Contains(int(col)) {
needHash = true
}
groupCols.Add(int(col))
}
if !orderedCols.SubsetOf(groupCols) {
return result, errors.AssertionFailedf("ordered cols must be a subset of grouping cols")
}
aggTyps := make([][]types.T, len(aggSpec.Aggregations))
aggCols := make([][]uint32, len(aggSpec.Aggregations))
aggFns := make([]execinfrapb.AggregatorSpec_Func, len(aggSpec.Aggregations))
result.ColumnTypes = make([]types.T, len(aggSpec.Aggregations))
for i, agg := range aggSpec.Aggregations {
if agg.Distinct {
return result, errors.Newf("distinct aggregation not supported")
}
if agg.FilterColIdx != nil {
return result, errors.Newf("filtering aggregation not supported")
}
if len(agg.Arguments) > 0 {
return result, errors.Newf("aggregates with arguments not supported")
}
aggTyps[i] = make([]types.T, len(agg.ColIdx))
for j, colIdx := range agg.ColIdx {
aggTyps[i][j] = spec.Input[0].ColumnTypes[colIdx]
}
aggCols[i] = agg.ColIdx
aggFns[i] = agg.Func
switch agg.Func {
case execinfrapb.AggregatorSpec_SUM:
switch aggTyps[i][0].Family() {
case types.IntFamily:
// TODO(alfonso): plan ordinary SUM on integer types by casting to DECIMAL
// at the end, mod issues with overflow. Perhaps to avoid the overflow
// issues, at first, we could plan SUM for all types besides Int64.
return result, errors.Newf("sum on int cols not supported (use sum_int)")
}
case execinfrapb.AggregatorSpec_SUM_INT:
// TODO(yuzefovich): support this case through vectorize.
if aggTyps[i][0].Width() != 64 {
return result, errors.Newf("sum_int is only supported on Int64 through vectorized")
}
}
_, retType, err := execinfrapb.GetAggregateInfo(agg.Func, aggTyps[i]...)
if err != nil {
return result, err
}
result.ColumnTypes[i] = *retType
}
var typs []coltypes.T
typs, err = typeconv.FromColumnTypes(spec.Input[0].ColumnTypes)
if err != nil {
return result, err
}
if needHash {
hashAggregatorMemAccount := streamingMemAccount
if !useStreamingMemAccountForBuffering {
hashAggregatorMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "hash-aggregator-limited",
)
}
result.Op, err = NewHashAggregator(
NewAllocator(ctx, hashAggregatorMemAccount), inputs[0], typs, aggFns,
aggSpec.GroupCols, aggCols, execinfrapb.IsScalarAggregate(aggSpec),
)
} else {
result.Op, err = NewOrderedAggregator(
NewAllocator(ctx, streamingMemAccount), inputs[0], typs, aggFns,
aggSpec.GroupCols, aggCols, execinfrapb.IsScalarAggregate(aggSpec),
)
result.IsStreaming = true
}
case core.Distinct != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
var distinctCols, orderedCols util.FastIntSet
for _, col := range core.Distinct.OrderedColumns {
orderedCols.Add(int(col))
}
for _, col := range core.Distinct.DistinctColumns {
if !orderedCols.Contains(int(col)) {
return result, errors.Newf("unsorted distinct not supported")
}
distinctCols.Add(int(col))
}
if !orderedCols.SubsetOf(distinctCols) {
return result, errors.AssertionFailedf("ordered cols must be a subset of distinct cols")
}
result.ColumnTypes = spec.Input[0].ColumnTypes
var typs []coltypes.T
typs, err = typeconv.FromColumnTypes(result.ColumnTypes)
if err != nil {
return result, err
}
result.Op, err = NewOrderedDistinct(inputs[0], core.Distinct.OrderedColumns, typs)
result.IsStreaming = true
case core.Ordinality != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
result.ColumnTypes = append(spec.Input[0].ColumnTypes, *types.Int)
result.Op, result.IsStreaming = NewOrdinalityOp(NewAllocator(ctx, streamingMemAccount), inputs[0]), true
case core.HashJoiner != nil:
createHashJoinerWithOnExprPlanning := func(
result *NewColOperatorResult,
leftTypes, rightTypes []coltypes.T,
leftOutCols, rightOutCols []uint32,
) (*execinfrapb.Expression, filterPlanningState, []uint32, []uint32, error) {
var (
onExpr *execinfrapb.Expression
onExprPlanning filterPlanningState
)
if !core.HashJoiner.OnExpr.Empty() {
if core.HashJoiner.Type != sqlbase.JoinType_INNER {
return onExpr, onExprPlanning, leftOutCols, rightOutCols, errors.Newf("can't plan non-inner hash join with on expressions")
}
onExpr = &core.HashJoiner.OnExpr
onExprPlanning = makeFilterPlanningState(len(leftTypes), len(rightTypes))
leftOutCols, rightOutCols, err = onExprPlanning.renderAllNeededCols(
*onExpr, leftOutCols, rightOutCols,
)
if err != nil {
return onExpr, onExprPlanning, leftOutCols, rightOutCols, err
}
}
hashJoinerMemAccount := streamingMemAccount
if !useStreamingMemAccountForBuffering {
hashJoinerMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "hash-joiner-limited",
)
}
result.Op, err = NewEqHashJoinerOp(
NewAllocator(ctx, hashJoinerMemAccount),
inputs[0],
inputs[1],
core.HashJoiner.LeftEqColumns,
core.HashJoiner.RightEqColumns,
leftOutCols,
rightOutCols,
leftTypes,
rightTypes,
core.HashJoiner.RightEqColumnsAreKey,
core.HashJoiner.LeftEqColumnsAreKey || core.HashJoiner.RightEqColumnsAreKey,
core.HashJoiner.Type,
)
return onExpr, onExprPlanning, leftOutCols, rightOutCols, err
}
err = createJoiner(
ctx, &result, flowCtx, spec, inputs, streamingMemAccount, &planningState,
core.HashJoiner.Type, createHashJoinerWithOnExprPlanning,
)
case core.MergeJoiner != nil:
if core.MergeJoiner.Type.IsSetOpJoin() {
return result, errors.AssertionFailedf("unexpectedly %s merge join was planned", core.MergeJoiner.Type.String())
}
// Merge joiner is a streaming operator when equality columns form a key
// for both of the inputs.
result.IsStreaming = core.MergeJoiner.LeftEqColumnsAreKey && core.MergeJoiner.RightEqColumnsAreKey
createMergeJoinerWithOnExprPlanning := func(
result *NewColOperatorResult,
leftTypes, rightTypes []coltypes.T,
leftOutCols, rightOutCols []uint32,
) (*execinfrapb.Expression, filterPlanningState, []uint32, []uint32, error) {
var (
onExpr *execinfrapb.Expression
onExprPlanning filterPlanningState
filterOnlyOnLeft bool
filterConstructor func(Operator) (Operator, error)
)
if !core.MergeJoiner.OnExpr.Empty() {
// At the moment, we want to be on the conservative side and not run
// queries with ON expressions when vectorize=auto, so we say that the
// merge join is not streaming which will reject running such a query
// through vectorized engine with 'auto' setting.
// TODO(yuzefovich): remove this when we're confident in ON expression
// support.
result.IsStreaming = false
onExpr = &core.MergeJoiner.OnExpr
onExprPlanning = makeFilterPlanningState(len(leftTypes), len(rightTypes))
switch core.MergeJoiner.Type {
case sqlbase.JoinType_INNER:
leftOutCols, rightOutCols, err = onExprPlanning.renderAllNeededCols(
*onExpr, leftOutCols, rightOutCols,
)
case sqlbase.JoinType_LEFT_SEMI, sqlbase.JoinType_LEFT_ANTI:
filterOnlyOnLeft, err = onExprPlanning.isFilterOnlyOnLeft(*onExpr)
filterConstructor = func(op Operator) (Operator, error) {
r := NewColOperatorResult{
Op: op,
ColumnTypes: append(spec.Input[0].ColumnTypes, spec.Input[1].ColumnTypes...),
}
// We don't need to specify indexVarMap because the filter will be
// run alongside the merge joiner, and it will have access to all
// of the columns from both sides.
err := r.planFilterExpr(ctx, flowCtx.NewEvalCtx(), *onExpr, nil /* indexVarMap */, streamingMemAccount)
return r.Op, err
}
default:
return onExpr, onExprPlanning, leftOutCols, rightOutCols, errors.Errorf("can only plan INNER, LEFT SEMI, and LEFT ANTI merge joins with ON expressions")
}
}
if err != nil {
return onExpr, onExprPlanning, leftOutCols, rightOutCols, err
}
mergeJoinerMemAccount := streamingMemAccount
if !result.IsStreaming && !useStreamingMemAccountForBuffering {
// Whether the merge joiner is streaming is already set above.
mergeJoinerMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "merge-joiner-limited",
)
}
result.Op, err = NewMergeJoinOp(
NewAllocator(ctx, mergeJoinerMemAccount),
core.MergeJoiner.Type,
inputs[0],
inputs[1],
leftOutCols,
rightOutCols,
leftTypes,
rightTypes,
core.MergeJoiner.LeftOrdering.Columns,
core.MergeJoiner.RightOrdering.Columns,
filterConstructor,
filterOnlyOnLeft,
)
return onExpr, onExprPlanning, leftOutCols, rightOutCols, err
}
err = createJoiner(
ctx, &result, flowCtx, spec, inputs, streamingMemAccount, &planningState,
core.MergeJoiner.Type, createMergeJoinerWithOnExprPlanning,
)
case core.JoinReader != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
var c *Columnarizer
c, err = wrapRowSource(
ctx,
flowCtx,
inputs[0],
spec.Input[0].ColumnTypes,
streamingMemAccount,
func(input execinfra.RowSource) (execinfra.RowSource, error) {
var (
jr execinfra.RowSource
err error
)
// The lookup and index joiners need to be passed the post-process specs,
// since they inspect them to figure out information about needed columns.
// This means that we'll let those processors do any renders or filters,
// which isn't ideal. We could improve this.
if len(core.JoinReader.LookupColumns) == 0 {
jr, err = execinfra.NewIndexJoiner(
flowCtx, spec.ProcessorID, core.JoinReader, input, post, nil, /* output */
)
} else {
jr, err = execinfra.NewJoinReader(
flowCtx, spec.ProcessorID, core.JoinReader, input, post, nil, /* output */
)
}
post = &execinfrapb.PostProcessSpec{}
if err != nil {
return nil, err
}
result.ColumnTypes = jr.OutputTypes()
return jr, nil
},
)
result.Op, result.IsStreaming = c, true
result.MetadataSources = append(result.MetadataSources, c)
case core.Sorter != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
input := inputs[0]
var inputTypes []coltypes.T
inputTypes, err = typeconv.FromColumnTypes(spec.Input[0].ColumnTypes)
if err != nil {
return result, err
}
orderingCols := core.Sorter.OutputOrdering.Columns
matchLen := core.Sorter.OrderingMatchLen
if matchLen > 0 {
// The input is already partially ordered. Use a chunks sorter to avoid
// loading all the rows into memory.
var sortChunksMemAccount *mon.BoundAccount
if useStreamingMemAccountForBuffering {
sortChunksMemAccount = streamingMemAccount
} else {
sortChunksMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "sort-chunks-limited",
)
}
result.Op, err = NewSortChunks(
NewAllocator(ctx, sortChunksMemAccount), input, inputTypes,
orderingCols, int(matchLen),
)
} else if post.Limit != 0 && post.Filter.Empty() && post.Limit+post.Offset < math.MaxUint16 {
// There is a limit specified with no post-process filter, so we know
// exactly how many rows the sorter should output. Choose a top K sorter,
// which uses a heap to avoid storing more rows than necessary.
k := uint16(post.Limit + post.Offset)
result.Op = NewTopKSorter(
NewAllocator(ctx, streamingMemAccount), input, inputTypes,
orderingCols, k,
)
result.IsStreaming = true
} else {
// No optimizations possible. Default to the standard sort operator.
var sorterMemAccount *mon.BoundAccount
if useStreamingMemAccountForBuffering {
sorterMemAccount = streamingMemAccount
} else {
sorterMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "sort-all-limited",
)
}
inMemorySorter, err := NewSorter(
NewAllocator(ctx, sorterMemAccount), input, inputTypes, orderingCols,
)
if err != nil {
return result, err
}
var diskSpillerMemAccount *mon.BoundAccount
if useStreamingMemAccountForBuffering {
diskSpillerMemAccount = streamingMemAccount
} else {
diskSpillerMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "disk-spiller-sort-all-limited",
)
}
diskSpillerAllocator := NewAllocator(ctx, diskSpillerMemAccount)
result.Op = newOneInputDiskSpiller(
diskSpillerAllocator,
input, inMemorySorter.(bufferingInMemoryOperator),
func(input Operator) Operator {
return newExternalSorter(diskSpillerAllocator, input, inputTypes, orderingCols)
})
}
result.ColumnTypes = spec.Input[0].ColumnTypes
case core.Windower != nil:
if err := checkNumIn(inputs, 1); err != nil {
return result, err
}
if len(core.Windower.WindowFns) != 1 {
return result, errors.Newf("only a single window function is currently supported")
}
wf := core.Windower.WindowFns[0]
if wf.Frame != nil &&
(wf.Frame.Mode != execinfrapb.WindowerSpec_Frame_RANGE ||
wf.Frame.Bounds.Start.BoundType != execinfrapb.WindowerSpec_Frame_UNBOUNDED_PRECEDING ||
(wf.Frame.Bounds.End != nil && wf.Frame.Bounds.End.BoundType != execinfrapb.WindowerSpec_Frame_CURRENT_ROW)) {
return result, errors.Newf("window functions with non-default window frames are not supported")
}
if wf.Func.AggregateFunc != nil {
return result, errors.Newf("aggregate functions used as window functions are not supported")
}
input := inputs[0]
var typs []coltypes.T
typs, err = typeconv.FromColumnTypes(spec.Input[0].ColumnTypes)
if err != nil {
return result, err
}
tempPartitionColOffset, partitionColIdx := 0, -1
if len(core.Windower.PartitionBy) > 0 {
// TODO(yuzefovich): add support for hashing partitioner (probably by
// leveraging hash routers once we can distribute). The decision about
// which kind of partitioner to use should come from the optimizer.
windowSortingPartitionerMemAccount := streamingMemAccount
if !useStreamingMemAccountForBuffering {
windowSortingPartitionerMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "window-sorting-partitioner-limited",
)
}
input, err = NewWindowSortingPartitioner(
NewAllocator(ctx, windowSortingPartitionerMemAccount), input, typs,
core.Windower.PartitionBy, wf.Ordering.Columns, int(wf.OutputColIdx),
)
tempPartitionColOffset, partitionColIdx = 1, int(wf.OutputColIdx)
} else {
if len(wf.Ordering.Columns) > 0 {
windowSorterMemAccount := streamingMemAccount
if !useStreamingMemAccountForBuffering {
windowSorterMemAccount = result.createBufferingMemAccount(
ctx, flowCtx, "window-sorter-limited",
)
}
input, err = NewSorter(
NewAllocator(ctx, windowSorterMemAccount), input, typs,
wf.Ordering.Columns,
)
}
// TODO(yuzefovich): when both PARTITION BY and ORDER BY clauses are
// omitted, the window function operator is actually streaming.
}
if err != nil {
return result, err
}
orderingCols := make([]uint32, len(wf.Ordering.Columns))
for i, col := range wf.Ordering.Columns {
orderingCols[i] = col.ColIdx
}
switch *wf.Func.WindowFunc {
case execinfrapb.WindowerSpec_ROW_NUMBER:
result.Op = NewRowNumberOperator(NewAllocator(ctx, streamingMemAccount), input, int(wf.OutputColIdx)+tempPartitionColOffset, partitionColIdx)
case execinfrapb.WindowerSpec_RANK:
result.Op, err = NewRankOperator(NewAllocator(ctx, streamingMemAccount), input, typs, false /* dense */, orderingCols, int(wf.OutputColIdx)+tempPartitionColOffset, partitionColIdx)
case execinfrapb.WindowerSpec_DENSE_RANK:
result.Op, err = NewRankOperator(NewAllocator(ctx, streamingMemAccount), input, typs, true /* dense */, orderingCols, int(wf.OutputColIdx)+tempPartitionColOffset, partitionColIdx)
default:
return result, errors.Newf("window function %s is not supported", wf.String())
}
if partitionColIdx != -1 {
// Window partitioner will append a temporary column to the batch which
// we want to project out.
projection := make([]uint32, 0, wf.OutputColIdx+1)
for i := uint32(0); i < wf.OutputColIdx; i++ {
projection = append(projection, i)
}
projection = append(projection, wf.OutputColIdx+1)
result.Op = NewSimpleProjectOp(result.Op, int(wf.OutputColIdx+1), projection)
}
result.ColumnTypes = append(spec.Input[0].ColumnTypes, *types.Int)
default:
return result, errors.Newf("unsupported processor core %q", core)
}
if err != nil {
return result, err
}
// After constructing the base operator, calculate its internal memory usage.
if sMem, ok := result.Op.(InternalMemoryOperator); ok {
result.InternalMemUsage += sMem.InternalMemoryUsage()
}
log.VEventf(ctx, 1, "made op %T\n", result.Op)
// Note: at this point, it is legal for ColumnTypes to be empty (it is
// legal for empty rows to be passed between processors).
if !post.Filter.Empty() {
if err = result.planFilterExpr(
ctx, flowCtx.NewEvalCtx(), post.Filter,
planningState.postFilterPlanning.indexVarMap, streamingMemAccount,
); err != nil {
return result, err
}
planningState.postFilterPlanning.projectOutExtraCols(&result)
}
if post.Projection {
if len(planningState.postJoinerProjection) > 0 {
result.addProjection(planningState.postJoinerProjection)
} else {
result.addProjection(post.OutputColumns)
}
} else if post.RenderExprs != nil {
log.VEventf(ctx, 2, "planning render expressions %+v", post.RenderExprs)
var renderedCols []uint32
for _, expr := range post.RenderExprs {
var (
helper execinfra.ExprHelper
renderInternalMem int
)
err := helper.Init(expr, result.ColumnTypes, flowCtx.EvalCtx)
if err != nil {
return result, err
}
var outputIdx int
result.Op, outputIdx, result.ColumnTypes, renderInternalMem, err = planProjectionOperators(
ctx, flowCtx.NewEvalCtx(), helper.Expr, result.ColumnTypes, result.Op, streamingMemAccount,
)
if err != nil {
return result, errors.Wrapf(err, "unable to columnarize render expression %q", expr)
}
if outputIdx < 0 {
return result, errors.AssertionFailedf("missing outputIdx")
}
result.InternalMemUsage += renderInternalMem
renderedCols = append(renderedCols, uint32(outputIdx))
}
result.Op = NewSimpleProjectOp(result.Op, len(result.ColumnTypes), renderedCols)
newTypes := make([]types.T, 0, len(renderedCols))
for _, j := range renderedCols {
newTypes = append(newTypes, result.ColumnTypes[j])
}
result.ColumnTypes = newTypes
}
if post.Offset != 0 {
result.Op = NewOffsetOp(result.Op, post.Offset)
}
if post.Limit != 0 {
result.Op = NewLimitOp(result.Op, post.Limit)
}
return result, err
}
type filterPlanningState struct {
numLeftInputCols int
numRightInputCols int
// indexVarMap will be populated when rendering all needed columns in case
// when at least one column from either side is used by the filter.
indexVarMap []int
// originalLeftOutCols and originalRightOutCols are stored so that we can
// remove all the extra columns that were added to handle the filter.
originalLeftOutCols []uint32
originalRightOutCols []uint32
}
func makeFilterPlanningState(numLeftInputCols, numRightInputCols int) filterPlanningState {
return filterPlanningState{
numLeftInputCols: numLeftInputCols,
numRightInputCols: numRightInputCols,
}
}
// renderAllNeededCols makes sure that all columns used by filter expression
// will be output. It does so by extracting the indices of all indexed vars
// used in the expression and appending those that are missing from *OutCols
// slices to the slices. Additionally, it populates p.indexVarMap to be used
// later to correctly remap the indexed vars and stores the original *OutCols
// to be projected after the filter has been run.
// It returns updated leftOutCols and rightOutCols.
// NOTE: projectOutExtraCols must be called after the filter has been run.
func (p *filterPlanningState) renderAllNeededCols(
filter execinfrapb.Expression, leftOutCols []uint32, rightOutCols []uint32,
) ([]uint32, []uint32, error) {
neededColumnsForFilter, err := findIVarsInRange(
filter,
0, /* start */
p.numLeftInputCols+p.numRightInputCols,
)
if err != nil {
return nil, nil, errors.Errorf("error parsing filter expression %q: %s", filter, err)
}
if len(neededColumnsForFilter) > 0 {
// Store the original out columns to be restored later.
p.originalLeftOutCols = leftOutCols
p.originalRightOutCols = rightOutCols
// At least one column is referenced by the filter expression.
p.indexVarMap = make([]int, p.numLeftInputCols+p.numRightInputCols)
for i := range p.indexVarMap {
p.indexVarMap[i] = -1
}
// First, we process only the left side.
for i, lCol := range leftOutCols {
p.indexVarMap[lCol] = i
}
for _, neededCol := range neededColumnsForFilter {
if int(neededCol) < p.numLeftInputCols {
if p.indexVarMap[neededCol] == -1 {
p.indexVarMap[neededCol] = len(leftOutCols)
leftOutCols = append(leftOutCols, neededCol)
}
}
}
// Now that we know how many columns from the left will be output, we can
// process the right side.
//
// Here is the explanation of all the indices' dance below:
// suppose we have two inputs with three columns in each, the filter
// expression as @1 = @4 AND @3 = @5, and leftOutCols = {0} and
// rightOutCols = {0} when this method was called. Note that only
// ordinals in the expression are counting from 1, everything else is
// zero-based.
// - After we processed the left side above, we have the following state:
// neededColumnsForFilter = {0, 2, 3, 4}
// leftOutCols = {0, 2}
// p.indexVarMap = {0, -1, 1, -1, -1, -1}
// - We calculate rColOffset = 3 to know which columns for filter are from
// the right side as well as to remap those for rightOutCols (the
// remapping step is needed because rightOutCols "thinks" only in the
// context of the right side).
// - Next, we add already present rightOutCols to the indexed var map:
// rightOutCols = {0}
// p.indexVarMap = {0, -1, 1, 2, -1, -1}
// Note that we needed to remap the column index, and we could do so only
// after the left side has been processed because we need to know how
// many columns will be output from the left.
// - Then, we go through the needed columns for filter slice again, and add
// any that are still missing to rightOutCols:
// rightOutCols = {0, 1}
// p.indexVarMap = {0, -1, 1, 2, 3, -1}
// - We also stored the fact that we appended 1 extra column for both
// inputs, and we will project those out.
rColOffset := uint32(p.numLeftInputCols)
for i, rCol := range rightOutCols {
p.indexVarMap[rCol+rColOffset] = len(leftOutCols) + i
}
for _, neededCol := range neededColumnsForFilter {
if neededCol >= rColOffset {
if p.indexVarMap[neededCol] == -1 {
p.indexVarMap[neededCol] = len(rightOutCols) + len(leftOutCols)
rightOutCols = append(rightOutCols, neededCol-rColOffset)
}
}
}
}
return leftOutCols, rightOutCols, nil
}
// isFilterOnlyOnLeft returns whether the filter expression doesn't use columns
// from the right side.
func (p *filterPlanningState) isFilterOnlyOnLeft(filter execinfrapb.Expression) (bool, error) {
// Find all needed columns for filter only from the right side.
neededColumnsForFilter, err := findIVarsInRange(
filter, p.numLeftInputCols, p.numLeftInputCols+p.numRightInputCols,
)
if err != nil {
return false, errors.Errorf("error parsing filter expression %q: %s", filter, err)
}
return len(neededColumnsForFilter) == 0, nil
}
// projectOutExtraCols, possibly, adds a projection to remove all the extra
// columns that were needed by the filter expression.
// NOTE: result.ColumnTypes is updated if the projection is added.
func (p *filterPlanningState) projectOutExtraCols(result *NewColOperatorResult) {
if p.indexVarMap == nil {
// If p.indexVarMap is nil, then this filter planning didn't add any extra
// columns, so there is nothing to project out.
return
}
projection := make([]uint32, 0, len(p.originalLeftOutCols)+len(p.originalRightOutCols))
for _, i := range p.originalLeftOutCols {
projection = append(projection, uint32(p.indexVarMap[i]))
}
rColOffset := uint32(p.numLeftInputCols)
for _, i := range p.originalRightOutCols {
projection = append(projection, uint32(p.indexVarMap[rColOffset+i]))
}
result.Op = NewSimpleProjectOp(result.Op, len(result.ColumnTypes), projection)
// Update output column types according to the projection.
newTypes := make([]types.T, 0, len(projection))
for _, j := range projection {
newTypes = append(newTypes, result.ColumnTypes[j])
}
result.ColumnTypes = newTypes
}
// createBufferingMemAccount instantiates a memory monitor and a memory account
// to be used with a buffering Operator. The receiver is updated to have
// references to both objects.
func (r *NewColOperatorResult) createBufferingMemAccount(
ctx context.Context, flowCtx *execinfra.FlowCtx, name string,
) *mon.BoundAccount {
bufferingOpMemMonitor := execinfra.NewLimitedMonitor(
ctx, flowCtx.EvalCtx.Mon, flowCtx.Cfg, name,
)
r.BufferingOpMemMonitors = append(r.BufferingOpMemMonitors, bufferingOpMemMonitor)
bufferingMemAccount := bufferingOpMemMonitor.MakeBoundAccount()
r.BufferingOpMemAccounts = append(r.BufferingOpMemAccounts, &bufferingMemAccount)
return &bufferingMemAccount
}
// setProjectedByJoinerColumnTypes sets column types on r according to a
// joiner handled projection.
// NOTE: r.ColumnTypes is updated.
func (r *NewColOperatorResult) setProjectedByJoinerColumnTypes(
spec *execinfrapb.ProcessorSpec, leftOutCols, rightOutCols []uint32,
) {
r.ColumnTypes = make([]types.T, 0, len(leftOutCols)+len(rightOutCols))
for _, leftOutCol := range leftOutCols {
r.ColumnTypes = append(r.ColumnTypes, spec.Input[0].ColumnTypes[leftOutCol])
}
for _, rightOutCol := range rightOutCols {
r.ColumnTypes = append(r.ColumnTypes, spec.Input[1].ColumnTypes[rightOutCol])
}
}
func (r *NewColOperatorResult) planFilterExpr(
ctx context.Context,
evalCtx *tree.EvalContext,
filter execinfrapb.Expression,
indexVarMap []int,
acc *mon.BoundAccount,
) error {
var (
helper execinfra.ExprHelper
selectionInternalMem int
)
err := helper.InitWithRemapping(filter, r.ColumnTypes, evalCtx, indexVarMap)
if err != nil {
return err
}
if helper.Expr == tree.DNull {
// The filter expression is tree.DNull meaning that it is always false, so
// we put a zero operator.
r.Op = NewZeroOp(r.Op)
return nil
}
var filterColumnTypes []types.T
r.Op, _, filterColumnTypes, selectionInternalMem, err = planSelectionOperators(
ctx, evalCtx, helper.Expr, r.ColumnTypes, r.Op, acc,
)
if err != nil {
return errors.Wrapf(err, "unable to columnarize filter expression %q", filter.Expr)
}
r.InternalMemUsage += selectionInternalMem
if len(filterColumnTypes) > len(r.ColumnTypes) {
// Additional columns were appended to store projections while evaluating
// the filter. Project them away.
var outputColumns []uint32
for i := range r.ColumnTypes {
outputColumns = append(outputColumns, uint32(i))
}
r.Op = NewSimpleProjectOp(r.Op, len(filterColumnTypes), outputColumns)
}
return nil
}
// addProjection adds a simple projection to r (Op and ColumnTypes are updated
// accordingly).
func (r *NewColOperatorResult) addProjection(projection []uint32) {
r.Op = NewSimpleProjectOp(r.Op, len(r.ColumnTypes), projection)
// Update output ColumnTypes.
newTypes := make([]types.T, 0, len(projection))
for _, j := range projection {
newTypes = append(newTypes, r.ColumnTypes[j])
}
r.ColumnTypes = newTypes
}
func planSelectionOperators(
ctx context.Context,
evalCtx *tree.EvalContext,
expr tree.TypedExpr,
columnTypes []types.T,
input Operator,
acc *mon.BoundAccount,
) (op Operator, resultIdx int, ct []types.T, internalMemUsed int, err error) {
switch t := expr.(type) {
case *tree.IndexedVar:
return NewBoolVecToSelOp(input, t.Idx), -1, columnTypes, internalMemUsed, nil
case *tree.AndExpr:
// AND expressions are handled by an implicit AND'ing of selection vectors.
// First we select out the tuples that are true on the left side, and then,
// only among the matched tuples, we select out the tuples that are true on
// the right side.
var leftOp, rightOp Operator
var internalMemUsedLeft, internalMemUsedRight int
leftOp, _, ct, internalMemUsedLeft, err = planSelectionOperators(
ctx, evalCtx, t.TypedLeft(), columnTypes, input, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
rightOp, resultIdx, ct, internalMemUsedRight, err = planSelectionOperators(
ctx, evalCtx, t.TypedRight(), ct, leftOp, acc,
)
return rightOp, resultIdx, ct, internalMemUsedLeft + internalMemUsedRight, err
case *tree.OrExpr:
// OR expressions are handled by converting them to an equivalent CASE
// statement. Since CASE statements don't have a selection form, plan a
// projection and then convert the resulting boolean to a selection vector.
//
// Rewrite the OR expression as an equivalent CASE expression.
// "a OR b" becomes "CASE WHEN a THEN true WHEN b THEN true ELSE false END".
// This way we can take advantage of the short-circuiting logic built into
// the CASE operator. (b should not be evaluated if a is true.)
caseExpr, err := tree.NewTypedCaseExpr(
nil, /* expr */
[]*tree.When{
{Cond: t.Left, Val: tree.DBoolTrue},
{Cond: t.Right, Val: tree.DBoolTrue},
},
tree.DBoolFalse,
types.Bool)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
op, resultIdx, ct, internalMemUsed, err = planProjectionOperators(
ctx, evalCtx, caseExpr, columnTypes, input, acc,
)
op = NewBoolVecToSelOp(op, resultIdx)
return op, resultIdx, ct, internalMemUsed, err
case *tree.CaseExpr:
op, resultIdx, ct, internalMemUsed, err = planProjectionOperators(
ctx, evalCtx, expr, columnTypes, input, acc,
)
op = NewBoolVecToSelOp(op, resultIdx)
return op, resultIdx, ct, internalMemUsed, err
case *tree.ComparisonExpr:
cmpOp := t.Operator
leftOp, leftIdx, ct, internalMemUsedLeft, err := planProjectionOperators(
ctx, evalCtx, t.TypedLeft(), columnTypes, input, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
lTyp := &ct[leftIdx]
if constArg, ok := t.Right.(tree.Datum); ok {
if t.Operator == tree.Like || t.Operator == tree.NotLike {
negate := t.Operator == tree.NotLike
op, err = GetLikeOperator(
evalCtx, leftOp, leftIdx, string(tree.MustBeDString(constArg)), negate)
return op, resultIdx, ct, internalMemUsedLeft, err
}
if t.Operator == tree.In || t.Operator == tree.NotIn {
negate := t.Operator == tree.NotIn
datumTuple, ok := tree.AsDTuple(constArg)
if !ok {
err = errors.Errorf("IN is only supported for constant expressions")
return nil, resultIdx, ct, internalMemUsed, err
}
op, err = GetInOperator(lTyp, leftOp, leftIdx, datumTuple, negate)
return op, resultIdx, ct, internalMemUsedLeft, err
}
if t.Operator == tree.IsDistinctFrom || t.Operator == tree.IsNotDistinctFrom {
if t.Right != tree.DNull {
err = errors.Errorf("IS DISTINCT FROM and IS NOT DISTINCT FROM are supported only with NULL argument")
return nil, resultIdx, ct, internalMemUsed, err
}
// IS NULL is replaced with IS NOT DISTINCT FROM NULL, so we want to
// negate when IS DISTINCT FROM is used.
negate := t.Operator == tree.IsDistinctFrom
op = newIsNullSelOp(leftOp, leftIdx, negate)
return op, resultIdx, ct, internalMemUsedLeft, err
}
op, err := GetSelectionConstOperator(lTyp, t.TypedRight().ResolvedType(), cmpOp, leftOp, leftIdx, constArg)
return op, resultIdx, ct, internalMemUsedLeft, err
}
rightOp, rightIdx, ct, internalMemUsedRight, err := planProjectionOperators(
ctx, evalCtx, t.TypedRight(), ct, leftOp, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
op, err := GetSelectionOperator(lTyp, &ct[rightIdx], cmpOp, rightOp, leftIdx, rightIdx)
return op, resultIdx, ct, internalMemUsedLeft + internalMemUsedRight, err
default:
return nil, resultIdx, nil, internalMemUsed, errors.Errorf("unhandled selection expression type: %s", reflect.TypeOf(t))
}
}
// planTypedMaybeNullProjectionOperators is used to plan projection operators, but is able to
// plan constNullOperators in the case that we know the "type" of the null. It is currently
// unsafe to plan a constNullOperator when we don't know the type of the null.
func planTypedMaybeNullProjectionOperators(
ctx context.Context,
evalCtx *tree.EvalContext,
expr tree.TypedExpr,
exprTyp *types.T,
columnTypes []types.T,
input Operator,
acc *mon.BoundAccount,
) (op Operator, resultIdx int, ct []types.T, internalMemUsed int, err error) {
if expr == tree.DNull {
resultIdx = len(columnTypes)
op = NewConstNullOp(NewAllocator(ctx, acc), input, resultIdx, typeconv.FromColumnType(exprTyp))
ct = append(columnTypes, *exprTyp)
return op, resultIdx, ct, internalMemUsed, nil
}
return planProjectionOperators(ctx, evalCtx, expr, columnTypes, input, acc)
}
// planProjectionOperators plans a chain of operators to execute the provided
// expression. It returns the tail of the chain, as well as the column index
// of the expression's result (if any, otherwise -1) and the column types of the
// resulting batches.
func planProjectionOperators(
ctx context.Context,
evalCtx *tree.EvalContext,
expr tree.TypedExpr,
columnTypes []types.T,
input Operator,
acc *mon.BoundAccount,
) (op Operator, resultIdx int, ct []types.T, internalMemUsed int, err error) {
resultIdx = -1
switch t := expr.(type) {
case *tree.IndexedVar:
return input, t.Idx, columnTypes, internalMemUsed, nil
case *tree.ComparisonExpr:
return planProjectionExpr(ctx, evalCtx, t.Operator, t.ResolvedType(), t.TypedLeft(), t.TypedRight(), columnTypes, input, acc)
case *tree.BinaryExpr:
return planProjectionExpr(ctx, evalCtx, t.Operator, t.ResolvedType(), t.TypedLeft(), t.TypedRight(), columnTypes, input, acc)
case *tree.CastExpr:
expr := t.Expr.(tree.TypedExpr)
// If the expression is NULL, we use planTypedMaybeNullProjectionOperators instead of planProjectionOperators
// because we can say that the type of the NULL is the type that we are casting to, rather than unknown.
// We can't use planProjectionOperators because it will reject planning a constNullOp without knowing
// the post typechecking "type" of the NULL.
if expr.ResolvedType() == types.Unknown {
op, resultIdx, ct, internalMemUsed, err = planTypedMaybeNullProjectionOperators(ctx, evalCtx, expr, t.Type, columnTypes, input, acc)
} else {
op, resultIdx, ct, internalMemUsed, err = planProjectionOperators(ctx, evalCtx, expr, columnTypes, input, acc)
}
if err != nil {
return nil, 0, nil, internalMemUsed, err
}
outputIdx := len(ct)
op, err = GetCastOperator(NewAllocator(ctx, acc), op, resultIdx, outputIdx, expr.ResolvedType(), t.Type)
ct = append(ct, *t.Type)
return op, outputIdx, ct, internalMemUsed, err
case *tree.FuncExpr:
var (
inputCols []int
projectionInternalMem int
)
ct = columnTypes
op = input
for _, e := range t.Exprs {
var err error
// TODO(rohany): This could be done better, especially in the case of
// constant arguments, because the vectorized engine right now
// creates a new column full of the constant value.
op, resultIdx, ct, projectionInternalMem, err = planProjectionOperators(
ctx, evalCtx, e.(tree.TypedExpr), ct, op, acc,
)
if err != nil {
return nil, resultIdx, nil, internalMemUsed, err
}
inputCols = append(inputCols, resultIdx)
internalMemUsed += projectionInternalMem
}
funcOutputType := t.ResolvedType()
resultIdx = len(ct)
ct = append(ct, *funcOutputType)
op, err = NewBuiltinFunctionOperator(NewAllocator(ctx, acc), evalCtx, t, ct, inputCols, resultIdx, op)
return op, resultIdx, ct, internalMemUsed, err
case tree.Datum:
datumType := t.ResolvedType()
ct = columnTypes
resultIdx = len(ct)
ct = append(ct, *datumType)
if datumType.Family() == types.UnknownFamily {
return nil, resultIdx, ct, internalMemUsed, errors.New("cannot plan null type unknown")
}
typ := typeconv.FromColumnType(datumType)
constVal, err := typeconv.GetDatumToPhysicalFn(datumType)(t)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
op, err := NewConstOp(NewAllocator(ctx, acc), input, typ, constVal, resultIdx)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
return op, resultIdx, ct, internalMemUsed, nil
case *tree.CaseExpr:
if t.Expr != nil {
return nil, resultIdx, ct, internalMemUsed, errors.New("CASE <expr> WHEN expressions unsupported")
}
buffer := NewBufferOp(input)
internalMemUsed += buffer.(InternalMemoryOperator).InternalMemoryUsage()
caseOps := make([]Operator, len(t.Whens))
caseOutputType := typeconv.FromColumnType(t.ResolvedType())
caseOutputIdx := len(columnTypes)
ct = append(columnTypes, *t.ResolvedType())
thenIdxs := make([]int, len(t.Whens)+1)
for i, when := range t.Whens {
// The case operator is assembled from n WHEN arms, n THEN arms, and an
// ELSE arm. Each WHEN arm is a boolean projection. Each THEN arm (and the
// ELSE arm) is a projection of the type of the CASE expression. We set up
// each WHEN arm to write its output to a fresh column, and likewise for
// the THEN arms and the ELSE arm. Each WHEN arm individually acts on the
// single input batch from the CaseExpr's input and is then transformed
// into a selection vector, after which the THEN arm runs to create the
// output just for the tuples that matched the WHEN arm. Each subsequent
// WHEN arm will use the inverse of the selection vector to avoid running
// the WHEN projection on tuples that have already been matched by a
// previous WHEN arm. Finally, after each WHEN arm runs, we copy the
// results of the WHEN into a single output vector, assembling the final
// result of the case projection.
var whenInternalMemUsed, thenInternalMemUsed int
caseOps[i], resultIdx, ct, whenInternalMemUsed, err = planTypedMaybeNullProjectionOperators(
ctx, evalCtx, when.Cond.(tree.TypedExpr), t.ResolvedType(), ct, buffer, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
// Transform the booleans to a selection vector.
caseOps[i] = NewBoolVecToSelOp(caseOps[i], resultIdx)
// Run the "then" clause on those tuples that were selected.
caseOps[i], thenIdxs[i], ct, thenInternalMemUsed, err = planTypedMaybeNullProjectionOperators(
ctx, evalCtx, when.Val.(tree.TypedExpr), t.ResolvedType(), ct, caseOps[i], acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
internalMemUsed += whenInternalMemUsed + thenInternalMemUsed
}
var elseInternalMemUsed int
var elseOp Operator
elseExpr := t.Else
if elseExpr == nil {
// If there's no ELSE arm, we write NULLs.
elseExpr = tree.DNull
}
elseOp, thenIdxs[len(t.Whens)], ct, elseInternalMemUsed, err = planTypedMaybeNullProjectionOperators(
ctx, evalCtx, elseExpr.(tree.TypedExpr), t.ResolvedType(), ct, buffer, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
internalMemUsed += elseInternalMemUsed
op := NewCaseOp(NewAllocator(ctx, acc), buffer, caseOps, elseOp, thenIdxs, caseOutputIdx, caseOutputType)
internalMemUsed += op.(InternalMemoryOperator).InternalMemoryUsage()
return op, caseOutputIdx, ct, internalMemUsed, nil
case *tree.AndExpr, *tree.OrExpr:
return planLogicalProjectionOp(ctx, evalCtx, expr, columnTypes, input, acc)
default:
return nil, resultIdx, nil, internalMemUsed, errors.Errorf("unhandled projection expression type: %s", reflect.TypeOf(t))
}
}
func planProjectionExpr(
ctx context.Context,
evalCtx *tree.EvalContext,
binOp tree.Operator,
outputType *types.T,
left, right tree.TypedExpr,
columnTypes []types.T,
input Operator,
acc *mon.BoundAccount,
) (op Operator, resultIdx int, ct []types.T, internalMemUsed int, err error) {
resultIdx = -1
// There are 3 cases. Either the left is constant, the right is constant,
// or neither are constant.
lConstArg, lConst := left.(tree.Datum)
if lConst {
// Case one: The left is constant.
// Normally, the optimizer normalizes binary exprs so that the constant
// argument is on the right side. This doesn't happen for non-commutative
// operators such as - and /, though, so we still need this case.
var rightOp Operator
var rightIdx int
rightOp, rightIdx, ct, internalMemUsed, err = planProjectionOperators(
ctx, evalCtx, right, columnTypes, input, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
resultIdx = len(ct)
// The projection result will be outputted to a new column which is appended
// to the input batch.
op, err = GetProjectionLConstOperator(
NewAllocator(ctx, acc), left.ResolvedType(), &ct[rightIdx], binOp,
rightOp, rightIdx, lConstArg, resultIdx,
)
ct = append(ct, *outputType)
if sMem, ok := op.(InternalMemoryOperator); ok {
internalMemUsed += sMem.InternalMemoryUsage()
}
return op, resultIdx, ct, internalMemUsed, err
}
leftOp, leftIdx, ct, internalMemUsedLeft, err := planProjectionOperators(
ctx, evalCtx, left, columnTypes, input, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
internalMemUsed += internalMemUsedLeft
if rConstArg, rConst := right.(tree.Datum); rConst {
// Case 2: The right is constant.
// The projection result will be outputted to a new column which is appended
// to the input batch.
resultIdx = len(ct)
if binOp == tree.Like || binOp == tree.NotLike {
negate := binOp == tree.NotLike
op, err = GetLikeProjectionOperator(
NewAllocator(ctx, acc), evalCtx, leftOp, leftIdx, resultIdx,
string(tree.MustBeDString(rConstArg)), negate,
)
} else if binOp == tree.In || binOp == tree.NotIn {
negate := binOp == tree.NotIn
datumTuple, ok := tree.AsDTuple(rConstArg)
if !ok {
err = errors.Errorf("IN operator supported only on constant expressions")
return nil, resultIdx, ct, internalMemUsed, err
}
op, err = GetInProjectionOperator(
NewAllocator(ctx, acc), &ct[leftIdx], leftOp, leftIdx,
resultIdx, datumTuple, negate,
)
} else if binOp == tree.IsDistinctFrom || binOp == tree.IsNotDistinctFrom {
if right != tree.DNull {
err = errors.Errorf("IS DISTINCT FROM and IS NOT DISTINCT FROM are supported only with NULL argument")
return nil, resultIdx, ct, internalMemUsed, err
}
// IS NULL is replaced with IS NOT DISTINCT FROM NULL, so we want to
// negate when IS DISTINCT FROM is used.
negate := binOp == tree.IsDistinctFrom
op = newIsNullProjOp(NewAllocator(ctx, acc), leftOp, leftIdx, resultIdx, negate)
} else {
op, err = GetProjectionRConstOperator(
NewAllocator(ctx, acc), &ct[leftIdx], right.ResolvedType(), binOp,
leftOp, leftIdx, rConstArg, resultIdx,
)
}
ct = append(ct, *outputType)
if sMem, ok := op.(InternalMemoryOperator); ok {
internalMemUsed += sMem.InternalMemoryUsage()
}
return op, resultIdx, ct, internalMemUsed, err
}
// Case 3: neither are constant.
rightOp, rightIdx, ct, internalMemUsedRight, err := planProjectionOperators(
ctx, evalCtx, right, ct, leftOp, acc,
)
if err != nil {
return nil, resultIdx, nil, internalMemUsed, err
}
internalMemUsed += internalMemUsedRight
resultIdx = len(ct)
op, err = GetProjectionOperator(
NewAllocator(ctx, acc), &ct[leftIdx], &ct[rightIdx], binOp, rightOp,
leftIdx, rightIdx, resultIdx,
)
ct = append(ct, *outputType)
if sMem, ok := op.(InternalMemoryOperator); ok {
internalMemUsed += sMem.InternalMemoryUsage()
}
return op, resultIdx, ct, internalMemUsed, err
}
// planLogicalProjectionOp plans all the needed operators for a projection of
// a logical operation (either AND or OR).
func planLogicalProjectionOp(
ctx context.Context,
evalCtx *tree.EvalContext,
expr tree.TypedExpr,
columnTypes []types.T,
input Operator,
acc *mon.BoundAccount,
) (op Operator, resultIdx int, ct []types.T, internalMemUsed int, err error) {
// Add a new boolean column that will store the result of the projection.
resultIdx = len(columnTypes)
ct = append(columnTypes, *types.Bool)
var (
typedLeft, typedRight tree.TypedExpr
leftProjOpChain, rightProjOpChain, outputOp Operator
leftIdx, rightIdx int
internalMemUsedLeft, internalMemUsedRight int
leftFeedOp, rightFeedOp feedOperator
)
switch t := expr.(type) {
case *tree.AndExpr:
typedLeft = t.TypedLeft()
typedRight = t.TypedRight()
case *tree.OrExpr:
typedLeft = t.TypedLeft()
typedRight = t.TypedRight()
default:
execerror.VectorizedInternalPanic(fmt.Sprintf("unexpected logical expression type %s", t.String()))
}
leftProjOpChain, leftIdx, ct, internalMemUsedLeft, err = planTypedMaybeNullProjectionOperators(
ctx, evalCtx, typedLeft, types.Bool, ct, &leftFeedOp, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
rightProjOpChain, rightIdx, ct, internalMemUsedRight, err = planTypedMaybeNullProjectionOperators(
ctx, evalCtx, typedRight, types.Bool, ct, &rightFeedOp, acc,
)
if err != nil {
return nil, resultIdx, ct, internalMemUsed, err
}
switch expr.(type) {
case *tree.AndExpr:
outputOp = NewAndProjOp(
NewAllocator(ctx, acc),
input, leftProjOpChain, rightProjOpChain,
&leftFeedOp, &rightFeedOp,
leftIdx, rightIdx, resultIdx,
)
case *tree.OrExpr:
outputOp = NewOrProjOp(
NewAllocator(ctx, acc),
input, leftProjOpChain, rightProjOpChain,
&leftFeedOp, &rightFeedOp,
leftIdx, rightIdx, resultIdx,
)
}
return outputOp, resultIdx, ct, internalMemUsedLeft + internalMemUsedRight, nil
}
| {
var (
toWrapInput execinfra.RowSource
// TODO(asubiotto): Plumb proper processorIDs once we have stats.
processorID int32
)
// Optimization: if the input is a Columnarizer, its input is necessarily a
// distsql.RowSource, so remove the unnecessary conversion.
if c, ok := input.(*Columnarizer); ok {
// TODO(asubiotto): We might need to do some extra work to remove references
// to this operator (e.g. streamIDToOp).
toWrapInput = c.input
} else {
var err error
toWrapInput, err = NewMaterializer(
flowCtx,
processorID,
input,
inputTypes,
&execinfrapb.PostProcessSpec{},
nil, /* output */
nil, /* metadataSourcesQueue */
nil, /* outputStatsToTrace */
nil, /* cancelFlow */
)
if err != nil {
return nil, err
}
}
toWrap, err := newToWrap(toWrapInput)
if err != nil {
return nil, err
}
return NewColumnarizer(ctx, NewAllocator(ctx, acc), flowCtx, processorID, toWrap)
} |
mod.rs | pub mod key_events; | pub mod tablet_tool_events;
pub mod touch_events;
pub mod xdg_shell_events;
pub mod xdg_shell_v6_events;
pub mod xwayland_events;
pub use self::key_events::Key; | pub mod pointer_events;
pub mod seat_events;
pub mod switch_events;
pub mod tablet_pad_events; |
robotgo.go | // Copyright 2016 The go-vgo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// https://github.com/go-vgo/robotgo/blob/master/LICENSE
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
Package robotgo Go native cross-platform system automation.
Please make sure Golang, GCC is installed correctly before installing RobotGo;
See Requirements:
https://github.com/go-vgo/robotgo#requirements
Installation:
With Go module support (Go 1.11+), just import:
import "github.com/go-vgo/robotgo"
Otherwise, to install the robotgo package, run the command:
go get -u github.com/go-vgo/robotgo
*/
package robotgo
/*
#cgo darwin CFLAGS: -x objective-c -Wno-deprecated-declarations
#cgo darwin LDFLAGS: -framework Cocoa -framework OpenGL -framework IOKit
#cgo darwin LDFLAGS: -framework Carbon -framework CoreFoundation
#cgo linux CFLAGS: -I/usr/src
#cgo linux LDFLAGS: -L/usr/src -lm -lX11 -lXtst
#cgo windows LDFLAGS: -lgdi32 -luser32
//
#include "screen/goScreen.h"
#include "mouse/mouse_c.h"
#include "window/goWindow.h"
*/
import "C"
import (
"image"
"runtime"
"time"
"unsafe"
"github.com/vcaesar/tt"
)
const (
// Version get the robotgo version
Version = "v1.00.0.1189, MT. Baker!"
)
// GetVersion get the robotgo version
func GetVersion() string {
return Version
}
var (
// MouseSleep set the mouse default millisecond sleep time
MouseSleep = 0
// KeySleep set the key default millisecond sleep time
KeySleep = 0
// DisplayID set the screen display id
DisplayID = -1
)
type (
// Map a map[string]interface{}
Map map[string]interface{}
// CHex define CHex as c rgb Hex type (C.MMRGBHex)
CHex C.MMRGBHex
// CBitmap define CBitmap as C.MMBitmapRef type
CBitmap C.MMBitmapRef
)
// Bitmap is Bitmap struct
type Bitmap struct {
ImgBuf *uint8
Width, Height int
Bytewidth int
BitsPixel uint8
BytesPerPixel uint8
}
// Point is point struct
type Point struct {
X int
Y int
}
// Size is size structure
type Size struct {
W, H int
}
// Rect is rect structure
type Rect struct {
Point
Size
}
// Try handler(err)
func Try(fun func(), handler func(interface{})) {
defer func() {
if err := recover(); err != nil {
handler(err)
} | fun()
}
// MilliSleep sleep tm milli second
func MilliSleep(tm int) {
time.Sleep(time.Duration(tm) * time.Millisecond)
}
// Sleep time.Sleep tm second
func Sleep(tm int) {
time.Sleep(time.Duration(tm) * time.Second)
}
// Deprecated: use the MilliSleep(),
//
// MicroSleep time C.microsleep(tm)
func MicroSleep(tm float64) {
C.microsleep(C.double(tm))
}
// GoString trans C.char to string
func GoString(char *C.char) string {
return C.GoString(char)
}
/*
_______. ______ .______ _______ _______ .__ __.
/ | / || _ \ | ____|| ____|| \ | |
| (----`| ,----'| |_) | | |__ | |__ | \| |
\ \ | | | / | __| | __| | . ` |
.----) | | `----.| |\ \----.| |____ | |____ | |\ |
|_______/ \______|| _| `._____||_______||_______||__| \__|
*/
// ToMMRGBHex trans CHex to C.MMRGBHex
func ToMMRGBHex(hex CHex) C.MMRGBHex {
return C.MMRGBHex(hex)
}
// UintToHex trans uint32 to robotgo.CHex
func UintToHex(u uint32) CHex {
hex := U32ToHex(C.uint32_t(u))
return CHex(hex)
}
// U32ToHex trans C.uint32_t to C.MMRGBHex
func U32ToHex(hex C.uint32_t) C.MMRGBHex {
return C.MMRGBHex(hex)
}
// U8ToHex trans *C.uint8_t to C.MMRGBHex
func U8ToHex(hex *C.uint8_t) C.MMRGBHex {
return C.MMRGBHex(*hex)
}
// PadHex trans C.MMRGBHex to string
func PadHex(hex C.MMRGBHex) string {
color := C.pad_hex(hex)
gcolor := C.GoString(color)
C.free(unsafe.Pointer(color))
return gcolor
}
// PadHexs trans CHex to string
func PadHexs(hex CHex) string {
return PadHex(C.MMRGBHex(hex))
}
// HexToRgb trans hex to rgb
func HexToRgb(hex uint32) *C.uint8_t {
return C.color_hex_to_rgb(C.uint32_t(hex))
}
// RgbToHex trans rgb to hex
func RgbToHex(r, g, b uint8) C.uint32_t {
return C.color_rgb_to_hex(C.uint8_t(r), C.uint8_t(g), C.uint8_t(b))
}
// GetPxColor get the pixel color return C.MMRGBHex
func GetPxColor(x, y int, displayId ...int) C.MMRGBHex {
cx := C.int32_t(x)
cy := C.int32_t(y)
display := displayIdx(displayId...)
color := C.get_px_color(cx, cy, C.int32_t(display))
return color
}
// GetPixelColor get the pixel color return string
func GetPixelColor(x, y int, displayId ...int) string {
return PadHex(GetPxColor(x, y, displayId...))
}
// GetMouseColor get the mouse pos's color
func GetMouseColor(displayId ...int) string {
x, y := GetMousePos()
return GetPixelColor(x, y, displayId...)
}
// IsMain is main display
func IsMain(displayId int) bool {
return displayId == GetMainId()
}
func displayIdx(id ...int) int {
display := -1
if DisplayID != -1 {
display = DisplayID
}
if len(id) > 0 {
display = id[0]
}
return display
}
func getNumDisplays() int {
return int(C.get_num_displays())
}
// SysScale get the sys scale
func SysScale(displayId ...int) float64 {
display := displayIdx(displayId...)
s := C.sys_scale(C.int32_t(display))
return float64(s)
}
// Scaled get the screen scaled size
func Scaled(x int, displayId ...int) int {
f := ScaleF(displayId...)
return Scaled0(x, f)
}
// Scaled0 return int(x * f)
func Scaled0(x int, f float64) int {
return int(float64(x) * f)
}
// GetScreenSize get the screen size
func GetScreenSize() (int, int) {
size := C.getMainDisplaySize()
return int(size.w), int(size.h)
}
// GetScreenRect get the screen rect (x, y, w, h)
func GetScreenRect(displayId ...int) Rect {
display := -1
if len(displayId) > 0 {
display = displayId[0]
}
rect := C.getScreenRect(C.int32_t(display))
x, y, w, h := int(rect.origin.x), int(rect.origin.y),
int(rect.size.w), int(rect.size.h)
if runtime.GOOS == "windows" {
f := ScaleF(displayId...)
x, y, w, h = Scaled0(x, f), Scaled0(y, f), Scaled0(w, f), Scaled0(h, f)
}
return Rect{
Point{X: x, Y: y},
Size{W: w, H: h},
}
}
// GetScaleSize get the screen scale size
func GetScaleSize(displayId ...int) (int, int) {
x, y := GetScreenSize()
f := ScaleF(displayId...)
return int(float64(x) * f), int(float64(y) * f)
}
// CaptureScreen capture the screen return bitmap(c struct),
// use `defer robotgo.FreeBitmap(bitmap)` to free the bitmap
//
// robotgo.CaptureScreen(x, y, w, h int)
func CaptureScreen(args ...int) CBitmap {
var x, y, w, h C.int32_t
displayId := -1
if DisplayID != -1 {
displayId = DisplayID
}
if len(args) > 4 {
displayId = args[4]
}
if len(args) > 3 {
x = C.int32_t(args[0])
y = C.int32_t(args[1])
w = C.int32_t(args[2])
h = C.int32_t(args[3])
} else {
// Get the main screen rect.
rect := GetScreenRect(displayId)
// x = C.int32_t(rect.X)
// y = C.int32_t(rect.Y)
w = C.int32_t(rect.W)
h = C.int32_t(rect.H)
}
bit := C.capture_screen(x, y, w, h, C.int32_t(displayId))
return CBitmap(bit)
}
// CaptureGo capture the screen and return bitmap(go struct)
func CaptureGo(args ...int) Bitmap {
bit := CaptureScreen(args...)
defer FreeBitmap(bit)
return ToBitmap(bit)
}
// CaptureImg capture the screen and return image.Image
func CaptureImg(args ...int) image.Image {
bit := CaptureScreen(args...)
defer FreeBitmap(bit)
return ToImage(bit)
}
// FreeBitmap free and dealloc the C bitmap
func FreeBitmap(bitmap CBitmap) {
// C.destroyMMBitmap(bitmap)
C.bitmap_dealloc(C.MMBitmapRef(bitmap))
}
// FreeBitmapArr free and dealloc the C bitmap array
func FreeBitmapArr(bit ...CBitmap) {
for i := 0; i < len(bit); i++ {
FreeBitmap(bit[i])
}
}
// ToMMBitmapRef trans CBitmap to C.MMBitmapRef
func ToMMBitmapRef(bit CBitmap) C.MMBitmapRef {
return C.MMBitmapRef(bit)
}
// ToBitmap trans C.MMBitmapRef to Bitmap
func ToBitmap(bit CBitmap) Bitmap {
bitmap := Bitmap{
ImgBuf: (*uint8)(bit.imageBuffer),
Width: int(bit.width),
Height: int(bit.height),
Bytewidth: int(bit.bytewidth),
BitsPixel: uint8(bit.bitsPerPixel),
BytesPerPixel: uint8(bit.bytesPerPixel),
}
return bitmap
}
// ToCBitmap trans Bitmap to C.MMBitmapRef
func ToCBitmap(bit Bitmap) CBitmap {
cbitmap := C.createMMBitmap_c(
(*C.uint8_t)(bit.ImgBuf),
C.int32_t(bit.Width),
C.int32_t(bit.Height),
C.int32_t(bit.Bytewidth),
C.uint8_t(bit.BitsPixel),
C.uint8_t(bit.BytesPerPixel),
)
return CBitmap(cbitmap)
}
// ToImage convert C.MMBitmapRef to standard image.Image
func ToImage(bit CBitmap) image.Image {
return ToRGBA(bit)
}
// ToRGBA convert C.MMBitmapRef to standard image.RGBA
func ToRGBA(bit CBitmap) *image.RGBA {
bmp1 := ToBitmap(bit)
return ToRGBAGo(bmp1)
}
// ImgToCBitmap trans image.Image to CBitmap
func ImgToCBitmap(img image.Image) CBitmap {
return ToCBitmap(ImgToBitmap(img))
}
// ByteToCBitmap trans []byte to CBitmap
func ByteToCBitmap(by []byte) CBitmap {
img, _ := ByteToImg(by)
return ImgToCBitmap(img)
}
// SetXDisplayName set XDisplay name (Linux)
func SetXDisplayName(name string) error {
cname := C.CString(name)
str := C.set_XDisplay_name(cname)
C.free(unsafe.Pointer(cname))
return toErr(str)
}
// GetXDisplayName get XDisplay name (Linux)
func GetXDisplayName() string {
name := C.get_XDisplay_name()
gname := C.GoString(name)
C.free(unsafe.Pointer(name))
return gname
}
// Deprecated: use the ScaledF(),
//
// ScaleX get the primary display horizontal DPI scale factor, drop
func ScaleX() int {
return int(C.scaleX())
}
// Deprecated: use the ScaledF(),
//
// Scale get the screen scale (only windows old), drop
func Scale() int {
dpi := map[int]int{
0: 100,
// DPI Scaling Level
96: 100,
120: 125,
144: 150,
168: 175,
192: 200,
216: 225,
// Custom DPI
240: 250,
288: 300,
384: 400,
480: 500,
}
x := ScaleX()
return dpi[x]
}
// Deprecated: use the ScaledF(),
//
// Scale0 return ScaleX() / 0.96, drop
func Scale0() int {
return int(float64(ScaleX()) / 0.96)
}
// Deprecated: use the ScaledF(),
//
// Mul mul the scale, drop
func Mul(x int) int {
s := Scale()
return x * s / 100
}
/*
.___ ___. ______ __ __ _______. _______
| \/ | / __ \ | | | | / || ____|
| \ / | | | | | | | | | | (----`| |__
| |\/| | | | | | | | | | \ \ | __|
| | | | | `--' | | `--' | .----) | | |____
|__| |__| \______/ \______/ |_______/ |_______|
*/
// CheckMouse check the mouse button
func CheckMouse(btn string) C.MMMouseButton {
// button = args[0].(C.MMMouseButton)
m1 := map[string]C.MMMouseButton{
"left": C.LEFT_BUTTON,
"center": C.CENTER_BUTTON,
"right": C.RIGHT_BUTTON,
"wheelDown": C.WheelDown,
"wheelUp": C.WheelUp,
"wheelLeft": C.WheelLeft,
"wheelRight": C.WheelRight,
}
if v, ok := m1[btn]; ok {
return v
}
return C.LEFT_BUTTON
}
// Deprecated: use the Move(),
//
// MoveMouse move the mouse
func MoveMouse(x, y int) {
Move(x, y)
}
// Move move the mouse to (x, y)
//
// Examples:
// robotgo.MouseSleep = 100 // 100 millisecond
// robotgo.Move(10, 10)
func Move(x, y int) {
// if runtime.GOOS == "windows" {
// f := ScaleF()
// x, y = Scaled0(x, f), Scaled0(y, f)
// }
cx := C.int32_t(x)
cy := C.int32_t(y)
C.moveMouse(C.MMPointInt32Make(cx, cy))
MilliSleep(MouseSleep)
}
// Deprecated: use the DragSmooth(),
//
// DragMouse drag the mouse to (x, y),
// It's same with the DragSmooth() now
func DragMouse(x, y int, args ...interface{}) {
Toggle("left")
MilliSleep(50)
// Drag(x, y, args...)
MoveSmooth(x, y, args...)
Toggle("left", "up")
}
// Deprecated: use the DragSmooth(),
//
// Drag drag the mouse to (x, y),
// It's not valid now, use the DragSmooth()
func Drag(x, y int, args ...string) {
var button C.MMMouseButton = C.LEFT_BUTTON
cx := C.int32_t(x)
cy := C.int32_t(y)
if len(args) > 0 {
button = CheckMouse(args[0])
}
C.dragMouse(C.MMPointInt32Make(cx, cy), button)
MilliSleep(MouseSleep)
}
// DragSmooth drag the mouse like smooth to (x, y)
//
// Examples:
// robotgo.DragSmooth(10, 10)
func DragSmooth(x, y int, args ...interface{}) {
Toggle("left")
MilliSleep(50)
MoveSmooth(x, y, args...)
Toggle("left", "up")
}
// Deprecated: use the MoveSmooth(),
//
// MoveMouseSmooth move the mouse smooth,
// moves mouse to x, y human like, with the mouse button up.
func MoveMouseSmooth(x, y int, args ...interface{}) bool {
return MoveSmooth(x, y, args...)
}
// MoveSmooth move the mouse smooth,
// moves mouse to x, y human like, with the mouse button up.
//
// robotgo.MoveSmooth(x, y int, low, high float64, mouseDelay int)
//
// Examples:
// robotgo.MoveSmooth(10, 10)
// robotgo.MoveSmooth(10, 10, 1.0, 2.0)
func MoveSmooth(x, y int, args ...interface{}) bool {
// if runtime.GOOS == "windows" {
// f := ScaleF()
// x, y = Scaled0(x, f), Scaled0(y, f)
// }
cx := C.int32_t(x)
cy := C.int32_t(y)
var (
mouseDelay = 1
low C.double
high C.double
)
if len(args) > 2 {
mouseDelay = args[2].(int)
}
if len(args) > 1 {
low = C.double(args[0].(float64))
high = C.double(args[1].(float64))
} else {
low = 1.0
high = 3.0
}
cbool := C.smoothlyMoveMouse(C.MMPointInt32Make(cx, cy), low, high)
MilliSleep(MouseSleep + mouseDelay)
return bool(cbool)
}
// MoveArgs get the mouse relative args
func MoveArgs(x, y int) (int, int) {
mx, my := GetMousePos()
mx = mx + x
my = my + y
return mx, my
}
// MoveRelative move mouse with relative
func MoveRelative(x, y int) {
Move(MoveArgs(x, y))
}
// MoveSmoothRelative move mouse smooth with relative
func MoveSmoothRelative(x, y int, args ...interface{}) {
mx, my := MoveArgs(x, y)
MoveSmooth(mx, my, args...)
}
// GetMousePos get the mouse's position return x, y
func GetMousePos() (int, int) {
pos := C.getMousePos()
x := int(pos.x)
y := int(pos.y)
return x, y
}
// Deprecated: use the Click(),
//
// MouseClick click the mouse
//
// robotgo.MouseClick(button string, double bool)
func MouseClick(args ...interface{}) {
Click(args...)
}
// Click click the mouse button
//
// robotgo.Click(button string, double bool)
//
// Examples:
// robotgo.Click() // default is left button
// robotgo.Click("right")
// robotgo.Click("wheelLeft")
func Click(args ...interface{}) {
var (
button C.MMMouseButton = C.LEFT_BUTTON
double bool
)
if len(args) > 0 {
button = CheckMouse(args[0].(string))
}
if len(args) > 1 {
double = args[1].(bool)
}
if !double {
C.clickMouse(button)
} else {
C.doubleClick(button)
}
MilliSleep(MouseSleep)
}
// MoveClick move and click the mouse
//
// robotgo.MoveClick(x, y int, button string, double bool)
//
// Examples:
// robotgo.MouseSleep = 100
// robotgo.MoveClick(10, 10)
func MoveClick(x, y int, args ...interface{}) {
Move(x, y)
MilliSleep(50)
Click(args...)
}
// MovesClick move smooth and click the mouse
//
// use the `robotgo.MouseSleep = 100`
func MovesClick(x, y int, args ...interface{}) {
MoveSmooth(x, y)
MilliSleep(50)
Click(args...)
}
// Toggle toggle the mouse, support button:
// "left", "center", "right",
// "wheelDown", "wheelUp", "wheelLeft", "wheelRight"
//
// Examples:
// robotgo.Toggle("left") // default is down
// robotgo.Toggle("left", "up")
func Toggle(key ...string) error {
var button C.MMMouseButton = C.LEFT_BUTTON
if len(key) > 0 {
button = CheckMouse(key[0])
}
down := true
if len(key) > 1 && key[1] == "up" {
down = false
}
C.toggleMouse(C.bool(down), button)
MilliSleep(MouseSleep)
return nil
}
// MouseDown send mouse down event
func MouseDown(key ...string) error {
return Toggle(key...)
}
// MouseUp send mouse up event
func MouseUp(key ...string) error {
if len(key) <= 0 {
key = append(key, "left")
}
return Toggle(append(key, "up")...)
}
// Scroll scroll the mouse to (x, y)
//
// robotgo.Scroll(x, y, msDelay int)
//
// Examples:
// robotgo.Scroll(10, 10)
func Scroll(x, y int, args ...int) {
var msDelay = 10
if len(args) > 0 {
msDelay = args[0]
}
cx := C.int(x)
cy := C.int(y)
C.scrollMouseXY(cx, cy)
MilliSleep(MouseSleep + msDelay)
}
// ScrollMouse scroll the mouse to (x, "up")
// supported: "up", "down", "left", "right"
//
// Examples:
// robotgo.ScrollMouse(10, "down")
// robotgo.ScrollMouse(10, "up")
func ScrollMouse(x int, direction ...string) {
d := "down"
if len(direction) > 0 {
d = direction[0]
}
if d == "down" {
Scroll(0, -x)
}
if d == "up" {
Scroll(0, x)
}
if d == "left" {
Scroll(x, 0)
}
if d == "right" {
Scroll(-x, 0)
}
// MilliSleep(MouseSleep)
}
// ScrollSmooth scroll the mouse smooth,
// default scroll 5 times and sleep 100 millisecond
//
// robotgo.ScrollSmooth(toy, num, sleep, tox)
//
// Examples:
// robotgo.ScrollSmooth(-10)
// robotgo.ScrollSmooth(-10, 6, 200, -10)
func ScrollSmooth(to int, args ...int) {
i := 0
num := 5
if len(args) > 0 {
num = args[0]
}
tm := 100
if len(args) > 1 {
tm = args[1]
}
tox := 0
if len(args) > 2 {
tox = args[2]
}
for {
Scroll(tox, to)
MilliSleep(tm)
i++
if i == num {
break
}
}
MilliSleep(MouseSleep)
}
// ScrollRelative scroll mouse with relative
//
// Examples:
// robotgo.ScrollRelative(10, 10)
func ScrollRelative(x, y int, args ...int) {
mx, my := MoveArgs(x, y)
Scroll(mx, my, args...)
}
/*
____ __ ____ __ .__ __. _______ ______ ____ __ ____
\ \ / \ / / | | | \ | | | \ / __ \ \ \ / \ / /
\ \/ \/ / | | | \| | | .--. | | | | \ \/ \/ /
\ / | | | . ` | | | | | | | | \ /
\ /\ / | | | |\ | | '--' | `--' | \ /\ /
\__/ \__/ |__| |__| \__| |_______/ \______/ \__/ \__/
*/
func alertArgs(args ...string) (string, string) {
var (
defaultBtn = "Ok"
cancelBtn = "Cancel"
)
if len(args) > 0 {
defaultBtn = args[0]
}
if len(args) > 1 {
cancelBtn = args[1]
}
return defaultBtn, cancelBtn
}
func showAlert(title, msg string, args ...string) bool {
defaultBtn, cancelBtn := alertArgs(args...)
cTitle := C.CString(title)
cMsg := C.CString(msg)
defaultButton := C.CString(defaultBtn)
cancelButton := C.CString(cancelBtn)
cbool := C.showAlert(cTitle, cMsg, defaultButton, cancelButton)
ibool := int(cbool)
C.free(unsafe.Pointer(cTitle))
C.free(unsafe.Pointer(cMsg))
C.free(unsafe.Pointer(defaultButton))
C.free(unsafe.Pointer(cancelButton))
return ibool == 0
}
// IsValid valid the window
func IsValid() bool {
abool := C.is_valid()
gbool := bool(abool)
return gbool
}
// SetActive set the window active
func SetActive(win C.MData) {
C.set_active(win)
}
// GetActive get the active window
func GetActive() C.MData {
mdata := C.get_active()
// fmt.Println("active----", mdata)
return mdata
}
// MinWindow set the window min
func MinWindow(pid int32, args ...interface{}) {
var (
state = true
hwnd int
)
if len(args) > 0 {
state = args[0].(bool)
}
if len(args) > 1 {
hwnd = args[1].(int)
}
C.min_window(C.uintptr(pid), C.bool(state), C.uintptr(hwnd))
}
// MaxWindow set the window max
func MaxWindow(pid int32, args ...interface{}) {
var (
state = true
hwnd int
)
if len(args) > 0 {
state = args[0].(bool)
}
if len(args) > 1 {
hwnd = args[1].(int)
}
C.max_window(C.uintptr(pid), C.bool(state), C.uintptr(hwnd))
}
// CloseWindow close the window
func CloseWindow(args ...int32) {
if len(args) <= 0 {
C.close_main_window()
return
}
var hwnd, isHwnd int32
if len(args) > 0 {
hwnd = args[0]
}
if len(args) > 1 {
isHwnd = args[1]
}
C.close_window_by_PId(C.uintptr(hwnd), C.uintptr(isHwnd))
}
// SetHandle set the window handle
func SetHandle(hwnd int) {
chwnd := C.uintptr(hwnd)
C.setHandle(chwnd)
}
// SetHandlePid set the window handle by pid
func SetHandlePid(pid int32, args ...int32) {
var isHwnd int32
if len(args) > 0 {
isHwnd = args[0]
}
C.set_handle_pid_mData(C.uintptr(pid), C.uintptr(isHwnd))
}
// GetHandPid get handle mdata by pid
func GetHandPid(pid int32, args ...int32) C.MData {
var isHwnd int32
if len(args) > 0 {
isHwnd = args[0]
}
return C.set_handle_pid(C.uintptr(pid), C.uintptr(isHwnd))
}
// GetHandle get the window handle
func GetHandle() int {
hwnd := C.get_handle()
ghwnd := int(hwnd)
// fmt.Println("gethwnd---", ghwnd)
return ghwnd
}
// Deprecated: use the GetHandle(),
//
// GetBHandle get the window handle, Wno-deprecated
//
// This function will be removed in version v1.0.0
func GetBHandle() int {
tt.Drop("GetBHandle", "GetHandle")
hwnd := C.b_get_handle()
ghwnd := int(hwnd)
//fmt.Println("gethwnd---", ghwnd)
return ghwnd
}
func cgetTitle(hwnd, isHwnd int32) string {
title := C.get_title_by_pid(C.uintptr(hwnd), C.uintptr(isHwnd))
gtitle := C.GoString(title)
return gtitle
}
// GetTitle get the window title return string
//
// Examples:
// fmt.Println(robotgo.GetTitle())
//
// ids, _ := robotgo.FindIds()
// robotgo.GetTitle(ids[0])
func GetTitle(args ...int32) string {
if len(args) <= 0 {
title := C.get_main_title()
gtitle := C.GoString(title)
return gtitle
}
if len(args) > 1 {
return internalGetTitle(args[0], args[1])
}
return internalGetTitle(args[0])
}
// GetPID get the process id return int32
func GetPID() int32 {
pid := C.get_PID()
return int32(pid)
}
// internalGetBounds get the window bounds
func internalGetBounds(pid int32, hwnd int) (int, int, int, int) {
bounds := C.get_bounds(C.uintptr(pid), C.uintptr(hwnd))
return int(bounds.X), int(bounds.Y), int(bounds.W), int(bounds.H)
}
// internalGetClient get the window client bounds
func internalGetClient(pid int32, hwnd int) (int, int, int, int) {
bounds := C.get_client(C.uintptr(pid), C.uintptr(hwnd))
return int(bounds.X), int(bounds.Y), int(bounds.W), int(bounds.H)
}
// Is64Bit determine whether the sys is 64bit
func Is64Bit() bool {
b := C.Is64Bit()
return bool(b)
}
func internalActive(pid int32, hwnd int) {
C.active_PID(C.uintptr(pid), C.uintptr(hwnd))
}
// ActivePID active the window by PID,
// If args[0] > 0 on the Windows platform via a window handle to active
// func ActivePID(pid int32, args ...int) {
// var hwnd int
// if len(args) > 0 {
// hwnd = args[0]
// }
// C.active_PID(C.uintptr(pid), C.uintptr(hwnd))
// }
// ActiveName active the window by name
//
// Examples:
// robotgo.ActiveName("chrome")
func ActiveName(name string) error {
pids, err := FindIds(name)
if err == nil && len(pids) > 0 {
return ActivePID(pids[0])
}
return err
} | }() |
bgpservicecommunities.go | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// BgpServiceCommunitiesClient is the network Client
type BgpServiceCommunitiesClient struct {
BaseClient
}
// NewBgpServiceCommunitiesClient creates an instance of the BgpServiceCommunitiesClient client.
func NewBgpServiceCommunitiesClient(subscriptionID string) BgpServiceCommunitiesClient {
return NewBgpServiceCommunitiesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewBgpServiceCommunitiesClientWithBaseURI creates an instance of the BgpServiceCommunitiesClient client using a
// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
// Azure stack).
func NewBgpServiceCommunitiesClientWithBaseURI(baseURI string, subscriptionID string) BgpServiceCommunitiesClient {
return BgpServiceCommunitiesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List gets all the available bgp service communities.
func (client BgpServiceCommunitiesClient) List(ctx context.Context) (result BgpServiceCommunityListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BgpServiceCommunitiesClient.List")
defer func() {
sc := -1
if result.bsclr.Response.Response != nil {
sc = result.bsclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.bsclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure sending request")
return
}
result.bsclr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client BgpServiceCommunitiesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/bgpServiceCommunities", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client BgpServiceCommunitiesClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client BgpServiceCommunitiesClient) ListResponder(resp *http.Response) (result BgpServiceCommunityListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK), | return
}
// listNextResults retrieves the next set of results, if any.
func (client BgpServiceCommunitiesClient) listNextResults(ctx context.Context, lastResults BgpServiceCommunityListResult) (result BgpServiceCommunityListResult, err error) {
req, err := lastResults.bgpServiceCommunityListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client BgpServiceCommunitiesClient) ListComplete(ctx context.Context) (result BgpServiceCommunityListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BgpServiceCommunitiesClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
} | autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp} |
roleService.ts | import { RoleModel } from ".";
/**
* User role management service.
*/
export interface RoleService {
/**
* Returns all available roles.
*/
getRoles(): Promise<RoleModel[]>;
| } |
|
reactiveform.component.ts | import { Component } from '@angular/core';
import { FormBuilder, FormGroup } from '@angular/forms';
@Component({
moduleId: module.id.replace("dist", ""),
selector: 'reactive-form',
templateUrl: '../templates/reactiveform.html'
})
export class Re |
title = 'Reactive Form';
reactiveForm: FormGroup;
constructor(private formBuilder: FormBuilder) {
this.reactiveForm = formBuilder.group({
'name': '',
'gender': 'Male',
'hiking': false,
'running': false
});
}
submitForm(value: any): void {
console.log('Reactive Form Data: ');
console.log(value);
}
} | activeFormComponent { |
slot.rs | use crate::db::DbQueryResult;
use crate::db::Pool;
use crate::schema::slots as slots_table;
use crate::schema::slots::dsl::*;
use chrono::prelude::*;
use diesel::dsl::insert_into;
use diesel::prelude::*;
use juniper::{GraphQLInputObject, GraphQLObject};
use serde::{Deserialize, Serialize};
#[derive(Queryable, Debug, GraphQLObject, Serialize)]
pub struct Slot {
id: i32,
start_time: NaiveDateTime,
end_time: NaiveDateTime,
created_at: NaiveDateTime,
updated_at: NaiveDateTime,
}
#[derive(Deserialize, Insertable, GraphQLInputObject)]
#[table_name = "slots_table"]
#[graphql(name = "SlotParams")]
pub struct NewSlot {
start_time: NaiveDateTime, | pub fn find(pool: &Pool, slot_id: i32) -> DbQueryResult<Slot> {
let conn = pool.get().unwrap();
slots.find(slot_id).get_result::<Slot>(&conn)
}
pub fn all(pool: &Pool) -> DbQueryResult<Vec<Slot>> {
let conn = pool.get().unwrap();
slots.load::<Slot>(&conn)
}
pub fn create(pool: &Pool, params: &NewSlot) -> DbQueryResult<Slot> {
let conn = pool.get().unwrap();
let res = insert_into(slots).values(params).get_result(&conn)?;
Ok(res)
}
} | end_time: NaiveDateTime,
}
impl Slot { |
didl.py |
from lxml.builder import ElementMaker
from moai.metadata.mods import NL_MODS, XSI_NS
class DIDL(object):
"""A metadata prefix implementing the DARE DIDL metadata format
this format is registered under the name "didl"
Note that this format re-uses oai_dc and mods formats that come with
MOAI by default
"""
def __init__(self, prefix, config, db):
|
def get_namespace(self):
return self.ns[self.prefix]
def get_schema_location(self):
return self.schemas[self.prefix]
def __call__(self, element, metadata):
data = metadata.record
DIDL = ElementMaker(namespace=self.ns['didl'], nsmap=self.ns)
DII = ElementMaker(namespace=self.ns['dii'])
DIP = ElementMaker(namespace=self.ns['dip'])
RDF = ElementMaker(namespace=self.ns['rdf'])
DCTERMS = ElementMaker(namespace=self.ns['dcterms'])
oai_url = (self.config.url+'?verb=GetRecord&'
'metadataPrefix=%s&identifier=%s' % (
self.prefix,
data['id']))
id_url = data['metadata'].get('url', [None])[0]
# generate mods for this feed
mods_data = DIDL.Resource(mimeType="application/xml")
NL_MODS('mods', self.config, self.db)(mods_data, metadata)
asset_data = []
descriptive_metadata = RDF.type()
descriptive_metadata.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/descriptiveMetadata')
didl = DIDL.DIDL(
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(
DCTERMS.modified(data['modified'].isoformat().split('.')[0]),
mimeType="application/xml"
)
),
DIDL.Component(
DIDL.Resource(ref=id_url or oai_url,mimeType="application/xml")
),
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(descriptive_metadata, mimeType="application/xml")
),
DIDL.Component(
DIDL.Descriptor(
DIDL.Statement("mods", mimeType="text/plain")),
mods_data)
),
)
)
object_file = RDF.type()
object_file.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/objectFile')
for root_item in didl:
for asset in data['metadata'].get('asset', []):
url = asset['url']
if not url.startswith('http://'):
url = self.config.url.rstrip('/') + '/' + url.lstrip('/')
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(object_file, mimeType="application/xml")
)
)
access = asset.get('access')
if access == 'open':
access = (
'http://purl.org/eprint/accessRights/OpenAccess')
elif access == 'restricted':
access = (
'http://purl.org/eprint/accessRights/RestrictedAccess')
elif access == 'closed':
access = (
'http://purl.org/eprint/accessRights/ClosedAccess')
if access:
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.accessRights(access),
mimeType="application/xml")))
for modified in asset.get('modified', []):
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.modified(modified),
mimeType="application/xml")))
item.append(
DIDL.Component(
DIDL.Resource(mimeType=asset['mimetype'],
ref=url)
)
)
root_item.append(item)
break
human_start_page = RDF.type()
human_start_page.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/humanStartPage')
if data['metadata'].get('url'):
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(human_start_page, mimeType="application/xml")
),
DIDL.Component(
DIDL.Resource(mimeType="text/html", ref=data['metadata']['url'][0])
)
)
root_item.append(item)
didl.attrib['{%s}schemaLocation' % XSI_NS] = (
'%s %s %s %s %s %s' % (self.ns['didl'],
self.schemas['didl'],
self.ns['dii'],
self.schemas['dii'],
self.ns['dip'],
self.schemas['dip']))
element.append(didl)
| self.prefix = prefix
self.config = config
self.db = db
self.ns = {'didl': "urn:mpeg:mpeg21:2002:02-DIDL-NS",
'dii': "urn:mpeg:mpeg21:2002:01-DII-NS",
'dip': "urn:mpeg:mpeg21:2005:01-DIP-NS",
'dcterms': "http://purl.org/dc/terms/",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'dc': 'http://purl.org/dc/elements/1.1/',
}
self.schemas = {'didl':'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd',
'dii': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd',
'dip': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd'} |
coinbase_pro.rs | use crypto_market_type::{get_market_types, MarketType};
use crypto_markets::{fetch_markets, fetch_symbols};
use crypto_pair::get_market_type;
#[macro_use]
mod utils;
const EXCHANGE_NAME: &str = "coinbase_pro";
#[test]
fn fetch_all_symbols() {
gen_all_symbols!();
}
#[test]
fn fetch_spot_symbols() |
#[test]
fn fetch_spot_markets() {
let markets = fetch_markets(EXCHANGE_NAME, MarketType::Spot).unwrap();
assert!(!markets.is_empty());
let btcusd = markets
.iter()
.find(|m| m.symbol == "BTC-USD")
.unwrap()
.clone();
assert_eq!(btcusd.precision.tick_size, 0.01);
assert_eq!(btcusd.precision.lot_size, 0.00000001);
let quantity_limit = btcusd.quantity_limit.unwrap();
assert_eq!(quantity_limit.min, 0.000016);
assert_eq!(quantity_limit.max, Some(1500.0));
}
| {
let symbols = fetch_symbols(EXCHANGE_NAME, MarketType::Spot).unwrap();
assert!(!symbols.is_empty());
for symbol in symbols.iter() {
assert!(symbol.contains("-"));
assert_eq!(symbol.to_string(), symbol.to_uppercase());
assert_eq!(
MarketType::Spot,
get_market_type(symbol, EXCHANGE_NAME, None)
);
}
} |
dispersion.py | from itertools import combinations
__author__ = "\n".join(['Ben Edwards ([email protected])',
'Huston Hedinger ([email protected])',
'Dan Schult ([email protected])'])
__all__ = ['dispersion']
def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
r"""Calculate dispersion between `u` and `v` in `G`.
A link between two actors (`u` and `v`) has a high dispersion when their
mutual ties (`s` and `t`) are not well connected with each other.
Parameters
----------
G : graph
A NetworkX graph.
u : node, optional
The source for the dispersion score (e.g. ego node of the network).
v : node, optional
The target of the dispersion score if specified.
normalized : bool
If True (default) normalize by the embededness of the nodes (u and v).
Returns
-------
nodes : dictionary
If u (v) is specified, returns a dictionary of nodes with dispersion
score for all "target" ("source") nodes. If neither u nor v is
specified, returns a dictionary of dictionaries for all nodes 'u' in the
graph with a dispersion score for each node 'v'.
Notes
-----
This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical
usage would be to run dispersion on the ego network $G_u$ if $u$ were
specified. Running :func:`dispersion` with neither $u$ nor $v$ specified
can take some time to complete.
References
----------
.. [1] Romantic Partnerships and the Dispersion of Social Ties:
A Network Analysis of Relationship Status on Facebook.
Lars Backstrom, Jon Kleinberg.
https://arxiv.org/pdf/1310.6753v1.pdf
"""
def | (G_u, u, v):
"""dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
u_nbrs = set(G_u[u])
ST = set(n for n in G_u[v] if n in u_nbrs)
set_uv = set([u, v])
# all possible ties of connections that u and b share
possib = combinations(ST, 2)
total = 0
for (s, t) in possib:
# neighbors of s that are in G_u, not including u and v
nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
# s and t are not directly connected
if t not in nbrs_s:
# s and t do not share a connection
if nbrs_s.isdisjoint(G_u[t]):
# tick for disp(u, v)
total += 1
# neighbors that u and v share
embededness = len(ST)
if normalized:
if embededness + c != 0:
norm_disp = ((total + b)**alpha) / (embededness + c)
else:
norm_disp = (total + b)**alpha
dispersion = norm_disp
else:
dispersion = total
return dispersion
if u is None:
# v and u are not specified
if v is None:
results = dict((n, {}) for n in G)
for u in G:
for v in G[u]:
results[u][v] = _dispersion(G, u, v)
# u is not specified, but v is
else:
results = dict.fromkeys(G[v], {})
for u in G[v]:
results[u] = _dispersion(G, v, u)
else:
# u is specified with no target v
if v is None:
results = dict.fromkeys(G[u], {})
for v in G[u]:
results[v] = _dispersion(G, u, v)
# both u and v are specified
else:
results = _dispersion(G, u, v)
return results
| _dispersion |
backup.go | package backup
import (
corev1 "k8s.io/api/core/v1"
api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1"
)
type Backup struct {
cluster string
namespace string
image string
imagePullSecrets []corev1.LocalObjectReference
}
func | (cr *api.PerconaXtraDBCluster, spec *api.PXCScheduledBackup) *Backup {
return &Backup{
cluster: cr.Name,
namespace: cr.Namespace,
image: spec.Image,
imagePullSecrets: spec.ImagePullSecrets,
}
}
| New |
__init__.py | def hello():
| print("Hello from Package 2") |
|
main.js | 'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
/*
https://github.com/banksean wrapped Makoto Matsumoto and Takuji Nishimura's code in a namespace
so it's better encapsulated. Now you can have multiple random number generators
and they won't stomp all over each other's state.
If you want to use this as a substitute for Math.random(), use the random()
method like so:
var m = new MersenneTwister();
var randomNumber = m.random();
You can also call the other genrand_{foo}() methods on the instance.
If you want to use a specific seed in order to get a repeatable random
sequence, pass an integer into the constructor:
var m = new MersenneTwister(123);
and that will always produce the same random sequence.
Sean McCullough ([email protected])
*/
/*
A C-program for MT19937, with initialization improved 2002/1/26.
Coded by Takuji Nishimura and Makoto Matsumoto.
Before using, initialize the state by using init_seed(seed)
or init_by_array(init_key, key_length).
Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Any feedback is very welcome.
http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
*/
class MersenneTwister {
constructor(seed) {
/* Period parameters */
this.N = 624;
this.M = 397;
this.MATRIX_A = 0x9908b0df; /* constant vector a */
this.UPPER_MASK = 0x80000000; /* most significant w-r bits */
this.LOWER_MASK = 0x7fffffff; /* least significant r bits */
this.mt = new Array(this.N); /* the array for the state vector */
this.mti = this.N + 1; /* mti==N+1 means mt[N] is not initialized */
if (Array.isArray(seed)) {
if (seed.length > 0)
this.initByArray(seed, seed.length);
}
else {
if (seed === undefined) {
this.initSeed(new Date().getTime());
}
else {
this.initSeed(seed);
}
}
}
/* initializes mt[N] with a seed */
/* origin name init_genrand */
initSeed(seed) {
this.mt[0] = seed >>> 0;
for (this.mti = 1; this.mti < this.N; this.mti++) {
const s = this.mt[this.mti - 1] ^ (this.mt[this.mti - 1] >>> 30);
this.mt[this.mti] =
((((s & 0xffff0000) >>> 16) * 1812433253) << 16) +
(s & 0x0000ffff) * 1812433253 +
this.mti;
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
this.mt[this.mti] >>>= 0;
/* for >32 bit machines */
}
}
/* initialize by an array with array-length */
/* init_key is the array for initializing keys */
/* key_length is its length */
/* slight change for C++, 2004/2/26 */
initByArray(initKey, keyLength) {
this.initSeed(19650218);
let i = 1;
let j = 0;
let k = this.N > keyLength ? this.N : keyLength;
for (; k; k--) {
const s = this.mt[i - 1] ^ (this.mt[i - 1] >>> 30);
this.mt[i] =
(this.mt[i] ^
(((((s & 0xffff0000) >>> 16) * 1664525) << 16) +
(s & 0x0000ffff) * 1664525)) +
initKey[j] +
j; /* non linear */
this.mt[i] >>>= 0; /* for WORDSIZE > 32 machines */
i++;
j++;
if (i >= this.N) {
this.mt[0] = this.mt[this.N - 1];
i = 1;
}
if (j >= keyLength)
j = 0;
}
for (k = this.N - 1; k; k--) {
const s = this.mt[i - 1] ^ (this.mt[i - 1] >>> 30);
this.mt[i] =
(this.mt[i] ^
(((((s & 0xffff0000) >>> 16) * 1566083941) << 16) +
(s & 0x0000ffff) * 1566083941)) -
i; /* non linear */
this.mt[i] >>>= 0; /* for WORDSIZE > 32 machines */
i++;
if (i >= this.N) {
this.mt[0] = this.mt[this.N - 1];
i = 1;
}
}
this.mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
/* generates a random number on [0,0xffffffff]-interval */
/* origin name genrand_int32 */
randomInt32() {
let y;
const mag01 = [0x0, this.MATRIX_A];
/* mag01[x] = x * MATRIX_A for x=0,1 */
if (this.mti >= this.N) {
/* generate N words at one time */
let kk;
if (this.mti === this.N + 1)
/* if init_seed() has not been called, */
this.initSeed(5489); /* a default initial seed is used */
for (kk = 0; kk < this.N - this.M; kk++) {
y =
(this.mt[kk] & this.UPPER_MASK) | (this.mt[kk + 1] & this.LOWER_MASK);
this.mt[kk] = this.mt[kk + this.M] ^ (y >>> 1) ^ mag01[y & 0x1];
}
for (; kk < this.N - 1; kk++) {
y =
(this.mt[kk] & this.UPPER_MASK) | (this.mt[kk + 1] & this.LOWER_MASK);
this.mt[kk] =
this.mt[kk + (this.M - this.N)] ^ (y >>> 1) ^ mag01[y & 0x1];
}
y =
(this.mt[this.N - 1] & this.UPPER_MASK) |
(this.mt[0] & this.LOWER_MASK);
this.mt[this.N - 1] = this.mt[this.M - 1] ^ (y >>> 1) ^ mag01[y & 0x1];
this.mti = 0;
}
y = this.mt[this.mti++];
/* Tempering */
y ^= y >>> 11;
y ^= (y << 7) & 0x9d2c5680;
y ^= (y << 15) & 0xefc60000;
y ^= y >>> 18;
return y >>> 0;
}
/* generates a random number on [0,0x7fffffff]-interval */
/* origin name genrand_int31 */
randomInt31() {
return this.randomInt32() >>> 1;
}
/* generates a random number on [0,1]-real-interval */
/* origin name genrand_real1 */
randomReal1() {
return this.randomInt32() * (1.0 / 4294967295.0);
/* divided by 2^32-1 */
}
/* generates a random number on [0,1)-real-interval */
/* origin name genrand_real2 */
randomReal2() {
return this.randomInt32() * (1.0 / 4294967296.0);
/* divided by 2^32 */
}
/* generates a random number on (0,1)-real-interval */
/* origin name genrand_real3 */
randomReal3() {
return (this.randomInt32() + 0.5) * (1.0 / 4294967296.0);
/* divided by 2^32 */
}
/* generates a random number on [0,1) with 53-bit resolution*/
/* origin name genrand_res53 */
randomRes53() {
const a = this.randomInt32() >>> 5;
const b = this.randomInt32() >>> 6;
return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
}
}
/* These real versions are due to Isaku Wada, 2002/01/09 added */
const bannedVehicleVinLetters = ['O', 'I', 'Q'];
const numberStrings = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
const uppercaseHexadecimalLetters = ['A', 'B', 'C', 'D', 'E', 'F'];
const vehicleVinLetters = [
...uppercaseHexadecimalLetters,
'G',
'H',
'J',
'K',
'L',
'M',
'N',
'P',
'R',
'S',
'T',
'U',
'V',
'W',
'X',
'Y',
'Z'
];
const lowercaseHexadecimalLetters = ['a', 'b', 'c', 'd', 'e', 'f'];
const lowercaseLetters = [
...lowercaseHexadecimalLetters,
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z'
];
const uppercaseLetters = [...bannedVehicleVinLetters, ...vehicleVinLetters];
const lowercaseAlphaNumeric = [...numberStrings, ...lowercaseLetters];
const lowercaseHexadecimal = [...numberStrings, ...lowercaseHexadecimalLetters];
const uppercaseAlphaNumeric = [...numberStrings, ...uppercaseLetters];
const uppercaseHexadecimal = [...numberStrings, ...uppercaseHexadecimalLetters];
const RFC4122_TEMPLATE = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx';
/**
* Object with options
*
* @typedef {Object} NumberOptions
* @property {number} min Minimum value of range
* @property {number} max Maximum value of range
* @property {number} precision Precision of accuracy
*/
/**
* Object with options
*
* @typedef {Object} AlphaOptions
* @property {number} count Quantity of values, default to 1
* @property {boolean} uppercase flag to use uppercase letters
*/
/**
* Object with options
*
* @typedef {Object} HexadecimalOptions
* @property {number} count Quantity of values. default to 1
* @property {boolean} uppercase flag to use uppercase letters
* @property {boolean} prefix flag to add 0x prefix
*/
class Random {
constructor(seed) {
this.mersenne = new MersenneTwister(seed);
}
initSeed(seed) {
this.mersenne.initSeed(seed);
}
/**
* Returns lower or upper alpha characters based count and uppercase options
*
* @method random.alpha
* @since 1.5.0
* @param {number|AlphaOptions} options
* @returns {string} Returns the generated string with alpha characters.
* @example
* ```javascript
* random.alpha()
* // => 'b'
*
* random.alpha({count:2,uppercase:true})
* // => 'CD'
* ```
*/
alpha(options = {}) {
const defaultOptions = {
count: 1,
uppercase: false
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { count: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { count, uppercase } = finalOptions;
const lettersArray = uppercase ? uppercaseLetters : lowercaseLetters;
const letters = Array.from(Array(count).keys()).map(() => this.arrayElement(lettersArray));
return letters.join('');
}
/**
* Returns lower or upper alpha numeric characters based count and uppercase options
*
* @method random.alphaNumeric
* @since 1.6.0
* @param {number|AlphaOptions} options
* @returns {string} Returns the generated string with alpha numeric characters.
* @example
* ```javascript
* random.alphaNumeric()
* // => '5'
*
* random.alphaNumeric({count:2,uppercase:true})
* // => '1A'
* ```
*/
alphaNumeric(options = {}) {
const defaultOptions = {
count: 1,
uppercase: false
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { count: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { count, uppercase } = finalOptions;
const alphaNumericArray = uppercase
? uppercaseAlphaNumeric
: lowercaseAlphaNumeric;
const alphaNumerics = Array.from(Array(count).keys()).map(() => this.arrayElement(alphaNumericArray));
return alphaNumerics.join('');
}
/**
* Takes an array and returns a random element of the array
*
* @method random.arrayElement
* @since 1.2.0
* @param {Array} array
* @returns {*} Returns a random array element.
* @example
* ```javascript
* random.arrayElement([1,2,3])
* // => 2
*
* random.arrayElement()
* // => 'a'
* ```
*/
arrayElement(array) {
const finalArray = array || ['a', 'b', 'c'];
const index = this.number({ max: finalArray.length - 1 });
return finalArray[index];
}
/**
* Generate a random boolean
*
* @method random.boolean
* @since 1.2.0
* @returns {boolean} Returns the generated boolean.
* @example
* ```javascript
* random.boolean()
* // => true
*
* random.boolean()
* // => false
* ```
*/
boolean() {
return Boolean(this.number(1));
}
/**
* Returns a single random floating-point number based on a max number or range
*
* @method random.float
* @param {number|NumberOptions} options
* @since 1.2.0
* @returns {number} Returns the generated float.
* @example
* ```javascript
* random.float(100)
* // => 10
*
* random.float({min:10, max:20, precision:1})
* // => 15
* ```
*/
float(options = {}) {
const defaultOptions = {
precision: 0.01
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { precision: options }) : Object.assign(Object.assign({}, defaultOptions), options);
return this.number(finalOptions);
}
/**
*
* Returns lower or upper hexadecimal number string based on count, uppercase and prefix options
*
* @method random.hexadecimal
* @since 1.6.0
* @param {number|HexadecimalOptions} options
* @returns {string} Returns the generated string with hexadecimal characters.
* @example
* ```javascript
* random.hexadecimal()
* // => '0xf'
*
* random.hexadecimal({count:2, uppercase:true, prefix: false})
* // => '1A'
* ```
*/
hexadecimal(options = {}) {
const defaultOptions = {
count: 1,
uppercase: false,
prefix: true
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { count: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { count, uppercase, prefix } = finalOptions;
const hexadecimalArray = uppercase
? uppercaseHexadecimal
: lowercaseHexadecimal;
const hexadecimals = Array.from(Array(count).keys()).map(() => this.arrayElement(hexadecimalArray));
return prefix
? `${uppercase ? '0X' : '0x'}${hexadecimals.join('')}`
: hexadecimals.join('');
}
/**
* Returns a single random number based on a max number or range
*
* @method random.number
* @since 1.0.0
* @param {number|NumberOptions} options
* @returns {number} Returns the generated number.
* @example
* ```javascript
* random.number(100)
* // => 10
*
* random.number({min:10, max:20, precision:1})
* // => 15
* ```
*/
number(options = {}) {
const defaultOptions = {
min: 0,
max: 99999,
precision: 1
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { max: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { max, min, precision } = finalOptions;
// Make the range inclusive of the max value
const finalMax = max >= 0 ? max + precision : max;
const randomNumber = Math.floor(this.rand(finalMax / precision, min / precision));
// Workaround problem in Float point arithmetics for e.g. 6681493 / 0.01
return randomNumber / (1 / precision);
}
/**
* Generate a uuid.
*
* @method random.uuid
* @since 1.1.0
* @returns {string} Returns the generated uuid.
* @example
* ```javascript
* random.uuid()
* // => 49e71c40-9b21-4371-9699-2def33f62e66
*
* random.uuid()
* // => da94f128-4247-48e3-bc73-d0cae46b5093
* ```
*/
uuid() {
return RFC4122_TEMPLATE.replace(/[xy]/g, this.replacePlaceholders.bind(this));
}
replacePlaceholders(placeholder) {
const random = Math.floor(this.mersenne.randomReal2() * 16);
const value = placeholder === 'x' ? random : (random & 0x3) | 0x8;
return value.toString(16);
}
rand(max, min) {
return Math.floor(this.mersenne.randomReal2() * (max - min) + min);
}
}
/**
*
* @namespace faker.random
*/
/*
function Random (faker, seed) {
// Use a user provided seed if it exists
if (seed) {
if (Array.isArray(seed) && seed.length) {
mersenne.seed_array(seed);
}
else {
mersenne.seed(seed);
}
}
*/
/**
* takes an array and returns a subset with random elements of the array
*
* @method faker.random.arrayElements
* @param {array} array
* @param {number} count number of elements to pick
*/
/*
this.arrayElements = function (array, count) {
array = array || ["a", "b", "c"];
if (typeof count !== 'number') {
count = faker.random.number({ min: 1, max: array.length });
} else if (count > array.length) {
count = array.length;
} else if (count < 0) {
count = 0;
}
var arrayCopy = array.slice();
var countToRemove = arrayCopy.length - count;
for (var i = 0; i < countToRemove; i++) {
var indexToRemove = faker.random.number({ max: arrayCopy.length - 1 });
arrayCopy.splice(indexToRemove, 1);
}
return arrayCopy;
}
*/
/**
* takes an object and returns the randomly key or value
*
* @method faker.random.objectElement
* @param {object} object
* @param {mixed} field
*/
/* this.objectElement = function (object, field) {
object = object || { "foo": "bar", "too": "car" };
var array = Object.keys(object);
var key = faker.random.arrayElement(array);
return field === "key" ? key : object[key];
}
// TODO: have ability to return specific type of word? As in: noun, adjective, verb, etc
/**
* word
*
* @method faker.random.word
* @param {string} type
*/
/* this.word = function randomWord (type) {
var wordMethods = [
'commerce.department',
'commerce.productName',
'commerce.productAdjective',
'commerce.productMaterial',
'commerce.product',
'commerce.color',
'company.catchPhraseAdjective',
'company.catchPhraseDescriptor',
'company.catchPhraseNoun',
'company.bsAdjective',
'company.bsBuzz',
'company.bsNoun',
'address.streetSuffix',
'address.county',
'address.country',
'address.state',
'finance.accountName',
'finance.transactionType',
'finance.currencyName',
'hacker.noun',
'hacker.verb',
'hacker.adjective',
'hacker.ingverb',
'hacker.abbreviation',
'name.jobDescriptor',
'name.jobArea',
'name.jobType'];
// randomly pick from the many faker methods that can generate words
var randomWordMethod = faker.random.arrayElement(wordMethods);
var result = faker.fake('{{' + randomWordMethod + '}}');
return faker.random.arrayElement(result.split(' '));
}
*/
/**
* randomWords
*
* @method faker.random.words
* @param {number} count defaults to a random value between 1 and 3
*/
/* this.words = function randomWords (count) {
var words = [];
if (typeof count === "undefined") {
count = faker.random.number({min:1, max: 3});
}
for (var i = 0; i<count; i++) {
words.push(faker.random.word());
}
return words.join(' ');
}
*/
/**
* locale
*
* @method faker.random.locale
*/
/* this.locale = function randomLocale () {
return faker.random.arrayElement(Object.keys(faker.locales));
};
*/
//}
//module['exports'] = Random;
/**
* Object with options
*
* @typedef {Object} DateTimeOptions
* @property {number} days Number that represents days
* @property {number} years Number that represents years
* @property {Date} dateReference Value to take as reference
*/
class DateTime {
constructor(seed) {
this.random = new Random(seed);
}
initSeed(seed) {
this.random.initSeed(seed);
}
/**
* Returns a Date instance from the past
*
* @method date.past
* @since 1.9.0
* @param {number|DateTimeOptions} options
* @returns {Date} Returns a Date instance from the past.
* @example
* ```javascript
* dateTime.past()
* // => '2020-03-21T01:57:41.025Z'
*
* dateTime.past({days: 1, years:2, dateReference:new Date('2021-01-20')})
* // => '2020-06-15T02:25:40.025Z'
* ```
*/
past(options = {}) {
const defaultOptions = {
years: 1,
days: 0,
dateReference: new Date()
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { years: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { days, years, dateReference } = finalOptions;
const date = new Date(dateReference instanceof Date ? dateReference.getTime() : dateReference);
const range = {
min: 1000,
max: years * 365 * 24 * 3600 * 1000 + days * 24 * 3600 * 1000
};
const past = date.getTime();
// some time from now to N years ago, in milliseconds
date.setTime(past - this.random.number(range));
return date;
}
/**
* Returns a Date instance from the past
*
* @method date.future
* @since 1.9.0
* @param {number|DateTimeOptions} options
* @returns {Date} Returns a Date instance from the future.
* @example
* ```javascript
* dateTime.future()
* // => '2021-05-22T06:30:16.025Z'
*
* dateTime.future({days: 1, years:2, dateReference:new Date('2021-01-20')})
* // => '2021-08-04T08:10:33.025Z'
* ```
*/
future(options = {}) {
const defaultOptions = {
years: 1,
days: 0,
dateReference: new Date()
};
const finalOptions = typeof options === 'number'
? Object.assign(Object.assign({}, defaultOptions), { years: options }) : Object.assign(Object.assign({}, defaultOptions), options);
const { days, years, dateReference } = finalOptions;
const date = new Date(dateReference instanceof Date ? dateReference.getTime() : dateReference);
const range = {
min: 1000,
max: years * 365 * 24 * 3600 * 1000 + days * 24 * 3600 * 1000
};
| // some time from now to N years ago, in milliseconds
date.setTime(past + this.random.number(range));
return date;
}
}
const avatarUris = [
'https://randomuser.me/api/portraits/men/0.jpg',
'https://randomuser.me/api/portraits/men/1.jpg',
'https://randomuser.me/api/portraits/men/2.jpg',
'https://randomuser.me/api/portraits/men/3.jpg',
'https://randomuser.me/api/portraits/men/4.jpg',
'https://randomuser.me/api/portraits/men/5.jpg',
'https://randomuser.me/api/portraits/men/6.jpg',
'https://randomuser.me/api/portraits/men/7.jpg',
'https://randomuser.me/api/portraits/men/8.jpg',
'https://randomuser.me/api/portraits/men/9.jpg',
'https://randomuser.me/api/portraits/men/10.jpg',
'https://randomuser.me/api/portraits/men/11.jpg',
'https://randomuser.me/api/portraits/men/12.jpg',
'https://randomuser.me/api/portraits/men/13.jpg',
'https://randomuser.me/api/portraits/men/14.jpg',
'https://randomuser.me/api/portraits/men/15.jpg',
'https://randomuser.me/api/portraits/men/16.jpg',
'https://randomuser.me/api/portraits/men/17.jpg',
'https://randomuser.me/api/portraits/men/18.jpg',
'https://randomuser.me/api/portraits/men/19.jpg',
'https://randomuser.me/api/portraits/men/20.jpg',
'https://randomuser.me/api/portraits/men/21.jpg',
'https://randomuser.me/api/portraits/men/22.jpg',
'https://randomuser.me/api/portraits/men/23.jpg',
'https://randomuser.me/api/portraits/men/24.jpg',
'https://randomuser.me/api/portraits/men/25.jpg',
'https://randomuser.me/api/portraits/men/26.jpg',
'https://randomuser.me/api/portraits/men/27.jpg',
'https://randomuser.me/api/portraits/men/28.jpg',
'https://randomuser.me/api/portraits/men/29.jpg',
'https://randomuser.me/api/portraits/men/30.jpg',
'https://randomuser.me/api/portraits/men/31.jpg',
'https://randomuser.me/api/portraits/men/32.jpg',
'https://randomuser.me/api/portraits/men/33.jpg',
'https://randomuser.me/api/portraits/men/34.jpg',
'https://randomuser.me/api/portraits/men/35.jpg',
'https://randomuser.me/api/portraits/men/36.jpg',
'https://randomuser.me/api/portraits/men/37.jpg',
'https://randomuser.me/api/portraits/men/38.jpg',
'https://randomuser.me/api/portraits/men/39.jpg',
'https://randomuser.me/api/portraits/men/40.jpg',
'https://randomuser.me/api/portraits/men/41.jpg',
'https://randomuser.me/api/portraits/men/42.jpg',
'https://randomuser.me/api/portraits/men/43.jpg',
'https://randomuser.me/api/portraits/men/44.jpg',
'https://randomuser.me/api/portraits/men/45.jpg',
'https://randomuser.me/api/portraits/men/46.jpg',
'https://randomuser.me/api/portraits/men/47.jpg',
'https://randomuser.me/api/portraits/men/48.jpg',
'https://randomuser.me/api/portraits/men/49.jpg',
'https://randomuser.me/api/portraits/men/50.jpg',
'https://randomuser.me/api/portraits/men/51.jpg',
'https://randomuser.me/api/portraits/men/52.jpg',
'https://randomuser.me/api/portraits/men/53.jpg',
'https://randomuser.me/api/portraits/men/54.jpg',
'https://randomuser.me/api/portraits/men/55.jpg',
'https://randomuser.me/api/portraits/men/56.jpg',
'https://randomuser.me/api/portraits/men/57.jpg',
'https://randomuser.me/api/portraits/men/58.jpg',
'https://randomuser.me/api/portraits/men/59.jpg',
'https://randomuser.me/api/portraits/men/60.jpg',
'https://randomuser.me/api/portraits/men/61.jpg',
'https://randomuser.me/api/portraits/men/62.jpg',
'https://randomuser.me/api/portraits/men/63.jpg',
'https://randomuser.me/api/portraits/men/64.jpg',
'https://randomuser.me/api/portraits/men/65.jpg',
'https://randomuser.me/api/portraits/men/66.jpg',
'https://randomuser.me/api/portraits/men/67.jpg',
'https://randomuser.me/api/portraits/men/68.jpg',
'https://randomuser.me/api/portraits/men/69.jpg',
'https://randomuser.me/api/portraits/men/70.jpg',
'https://randomuser.me/api/portraits/men/71.jpg',
'https://randomuser.me/api/portraits/men/72.jpg',
'https://randomuser.me/api/portraits/men/73.jpg',
'https://randomuser.me/api/portraits/men/74.jpg',
'https://randomuser.me/api/portraits/men/75.jpg',
'https://randomuser.me/api/portraits/men/76.jpg',
'https://randomuser.me/api/portraits/men/77.jpg',
'https://randomuser.me/api/portraits/men/78.jpg',
'https://randomuser.me/api/portraits/men/79.jpg',
'https://randomuser.me/api/portraits/men/80.jpg',
'https://randomuser.me/api/portraits/men/81.jpg',
'https://randomuser.me/api/portraits/men/82.jpg',
'https://randomuser.me/api/portraits/men/83.jpg',
'https://randomuser.me/api/portraits/men/84.jpg',
'https://randomuser.me/api/portraits/men/85.jpg',
'https://randomuser.me/api/portraits/men/86.jpg',
'https://randomuser.me/api/portraits/men/87.jpg',
'https://randomuser.me/api/portraits/men/88.jpg',
'https://randomuser.me/api/portraits/men/89.jpg',
'https://randomuser.me/api/portraits/men/90.jpg',
'https://randomuser.me/api/portraits/men/91.jpg',
'https://randomuser.me/api/portraits/men/92.jpg',
'https://randomuser.me/api/portraits/men/93.jpg',
'https://randomuser.me/api/portraits/men/94.jpg',
'https://randomuser.me/api/portraits/men/95.jpg',
'https://randomuser.me/api/portraits/men/96.jpg',
'https://randomuser.me/api/portraits/men/97.jpg',
'https://randomuser.me/api/portraits/men/99.jpg',
'https://randomuser.me/api/portraits/men/98.jpg',
'https://randomuser.me/api/portraits/men/99.jpg',
'https://randomuser.me/api/portraits/women/0.jpg',
'https://randomuser.me/api/portraits/women/1.jpg',
'https://randomuser.me/api/portraits/women/2.jpg',
'https://randomuser.me/api/portraits/women/3.jpg',
'https://randomuser.me/api/portraits/women/4.jpg',
'https://randomuser.me/api/portraits/women/5.jpg',
'https://randomuser.me/api/portraits/women/6.jpg',
'https://randomuser.me/api/portraits/women/7.jpg',
'https://randomuser.me/api/portraits/women/8.jpg',
'https://randomuser.me/api/portraits/women/9.jpg',
'https://randomuser.me/api/portraits/women/10.jpg',
'https://randomuser.me/api/portraits/women/11.jpg',
'https://randomuser.me/api/portraits/women/12.jpg',
'https://randomuser.me/api/portraits/women/13.jpg',
'https://randomuser.me/api/portraits/women/14.jpg',
'https://randomuser.me/api/portraits/women/15.jpg',
'https://randomuser.me/api/portraits/women/16.jpg',
'https://randomuser.me/api/portraits/women/17.jpg',
'https://randomuser.me/api/portraits/women/18.jpg',
'https://randomuser.me/api/portraits/women/19.jpg',
'https://randomuser.me/api/portraits/women/20.jpg',
'https://randomuser.me/api/portraits/women/21.jpg',
'https://randomuser.me/api/portraits/women/22.jpg',
'https://randomuser.me/api/portraits/women/23.jpg',
'https://randomuser.me/api/portraits/women/24.jpg',
'https://randomuser.me/api/portraits/women/25.jpg',
'https://randomuser.me/api/portraits/women/26.jpg',
'https://randomuser.me/api/portraits/women/27.jpg',
'https://randomuser.me/api/portraits/women/28.jpg',
'https://randomuser.me/api/portraits/women/29.jpg',
'https://randomuser.me/api/portraits/women/30.jpg',
'https://randomuser.me/api/portraits/women/31.jpg',
'https://randomuser.me/api/portraits/women/32.jpg',
'https://randomuser.me/api/portraits/women/33.jpg',
'https://randomuser.me/api/portraits/women/34.jpg',
'https://randomuser.me/api/portraits/women/35.jpg',
'https://randomuser.me/api/portraits/women/36.jpg',
'https://randomuser.me/api/portraits/women/37.jpg',
'https://randomuser.me/api/portraits/women/38.jpg',
'https://randomuser.me/api/portraits/women/39.jpg',
'https://randomuser.me/api/portraits/women/40.jpg',
'https://randomuser.me/api/portraits/women/41.jpg',
'https://randomuser.me/api/portraits/women/42.jpg',
'https://randomuser.me/api/portraits/women/43.jpg',
'https://randomuser.me/api/portraits/women/44.jpg',
'https://randomuser.me/api/portraits/women/45.jpg',
'https://randomuser.me/api/portraits/women/46.jpg',
'https://randomuser.me/api/portraits/women/47.jpg',
'https://randomuser.me/api/portraits/women/48.jpg',
'https://randomuser.me/api/portraits/women/49.jpg',
'https://randomuser.me/api/portraits/women/50.jpg',
'https://randomuser.me/api/portraits/women/51.jpg',
'https://randomuser.me/api/portraits/women/52.jpg',
'https://randomuser.me/api/portraits/women/53.jpg',
'https://randomuser.me/api/portraits/women/54.jpg',
'https://randomuser.me/api/portraits/women/55.jpg',
'https://randomuser.me/api/portraits/women/56.jpg',
'https://randomuser.me/api/portraits/women/57.jpg',
'https://randomuser.me/api/portraits/women/58.jpg',
'https://randomuser.me/api/portraits/women/59.jpg',
'https://randomuser.me/api/portraits/women/60.jpg',
'https://randomuser.me/api/portraits/women/61.jpg',
'https://randomuser.me/api/portraits/women/62.jpg',
'https://randomuser.me/api/portraits/women/63.jpg',
'https://randomuser.me/api/portraits/women/64.jpg',
'https://randomuser.me/api/portraits/women/65.jpg',
'https://randomuser.me/api/portraits/women/66.jpg',
'https://randomuser.me/api/portraits/women/67.jpg',
'https://randomuser.me/api/portraits/women/68.jpg',
'https://randomuser.me/api/portraits/women/69.jpg',
'https://randomuser.me/api/portraits/women/70.jpg',
'https://randomuser.me/api/portraits/women/71.jpg',
'https://randomuser.me/api/portraits/women/72.jpg',
'https://randomuser.me/api/portraits/women/73.jpg',
'https://randomuser.me/api/portraits/women/74.jpg',
'https://randomuser.me/api/portraits/women/75.jpg',
'https://randomuser.me/api/portraits/women/76.jpg',
'https://randomuser.me/api/portraits/women/77.jpg',
'https://randomuser.me/api/portraits/women/78.jpg',
'https://randomuser.me/api/portraits/women/79.jpg',
'https://randomuser.me/api/portraits/women/80.jpg',
'https://randomuser.me/api/portraits/women/81.jpg',
'https://randomuser.me/api/portraits/women/82.jpg',
'https://randomuser.me/api/portraits/women/83.jpg',
'https://randomuser.me/api/portraits/women/84.jpg',
'https://randomuser.me/api/portraits/women/85.jpg',
'https://randomuser.me/api/portraits/women/86.jpg',
'https://randomuser.me/api/portraits/women/87.jpg',
'https://randomuser.me/api/portraits/women/88.jpg',
'https://randomuser.me/api/portraits/women/89.jpg',
'https://randomuser.me/api/portraits/women/90.jpg',
'https://randomuser.me/api/portraits/women/91.jpg',
'https://randomuser.me/api/portraits/women/92.jpg',
'https://randomuser.me/api/portraits/women/93.jpg',
'https://randomuser.me/api/portraits/women/94.jpg',
'https://randomuser.me/api/portraits/women/95.jpg',
'https://randomuser.me/api/portraits/women/96.jpg',
'https://randomuser.me/api/portraits/women/97.jpg',
'https://randomuser.me/api/portraits/women/98.jpg',
'https://randomuser.me/api/portraits/women/99.jpg'
];
const protocols = ['http', 'https'];
class Internet {
constructor(seed) {
this.random = new Random(seed);
}
initSeed(seed) {
this.random.initSeed(seed);
}
/**
* Generates a random avatar image uri
* @method internet.avatar
* @since 1.3.0
* @returns {string} Returns a random image uri.
* @example
* ```javascript
* internet.avatar()
* // => 'https://randomuser.me/api/portraits/women/87.jpg',
*
* internet.avatar()
* // => 'https://randomuser.me/api/portraits/women/15.jpg',
* ```
*/
avatar() {
return this.random.arrayElement(avatarUris);
}
/**
* Generates a random IP address
* @method internet.ip
* @since 1.4.0
* @returns {string} Returns a random IP address.
* @example
* ```javascript
* internet.ip()
* // => '255.255.255.255',
*
* internet.ip()
* // => '0.0.0.0',
* ```
*/
ip() {
return [0, 0, 0, 0].map(() => this.random.number(255).toFixed(0)).join('.');
}
/**
* Generates a random IPv6 address
* @method internet.ipv6
* @since 1.10.0
* @returns {string} Returns a random IPv6 address.
* @example
* ```javascript
* internet.ipv6()
* // => '2001:0db8:6276:b1a7:5213:22f1:25df:c8a0',
*
* internet.ipv6()
* // => '9cda:87cd:9617:370e:8d56:d698:19c8:c195',
* ```
*/
ipv6() {
const result = Array.from(Array(8).keys()).map(() => this.random.hexadecimal({ count: 4, prefix: false }));
return result.join(':');
}
/**
* Returns a random protocol
* @method internet.protocol
* @since 1.4.0
* @returns {string} Returns a random protocol [http, https].
* @example
* ```javascript
* internet.protocol()
* // => 'https',
*
* internet.protocol()
* // => 'http',
* ```
*/
protocol() {
return this.random.arrayElement(protocols);
}
}
class Unique {
constructor(options = {}) {
const defaultOptions = { maxRetries: 20, maxTime: 10 };
const finalOptions = Object.assign(Object.assign({}, defaultOptions), options);
this.foundItems = {};
this.maxTime = finalOptions.maxTime;
this.maxRetries = finalOptions.maxRetries;
this.startTime = 0;
this.currentIterations = 0;
}
execute({ scope, method, args, model }) {
this.startTime = new Date().getTime();
this.currentIterations = 0;
return this.getUniqueValue(scope, method, args, model);
}
clear(scope) {
if (!scope) {
this.foundItems = {};
}
else if (this.foundItems[scope] !== undefined) {
this.foundItems[scope].clear();
}
}
errorMessage(message) {
throw new Error(`${message}\nMay not be able to generate any more unique values with current settings.` +
'\nTry adjusting maxTime or maxRetries parameters.');
}
isValuePresent(value, scope) {
const scopedValues = this.foundItems[scope];
if (scopedValues === undefined) {
this.foundItems[scope] = new Set();
return false;
}
return this.foundItems[scope].has(value);
}
getUniqueValue(scope, method, args, model) {
const now = new Date().getTime();
if (now - this.startTime >= this.maxTime) {
this.errorMessage(`Exceeded maxTime: ${this.maxTime}`);
}
if (this.currentIterations >= this.maxRetries) {
this.errorMessage(`Exceeded maxRetries: ${this.maxRetries}`);
}
const value = method.apply(model || this, args || []);
if (this.isValuePresent(value, scope) === false) {
this.foundItems[scope].add(value);
this.currentIterations = 0;
return value;
}
else {
this.currentIterations = this.currentIterations + 1;
return this.getUniqueValue(scope, method, args, model);
}
}
}
exports.DateTime = DateTime;
exports.Internet = Internet;
exports.Random = Random;
exports.Unique = Unique;
//# sourceMappingURL=main.js.map | const past = date.getTime();
|
utils.py | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import importlib
from collections import OrderedDict
from contextlib import contextmanager
import torch
_is_nncf_enabled = importlib.util.find_spec('nncf') is not None
def is_nncf_enabled():
return _is_nncf_enabled
def check_nncf_is_enabled():
if not is_nncf_enabled():
raise RuntimeError('Tried to use NNCF, but NNCF is not installed')
def | ():
if not is_nncf_enabled():
return None
import nncf
return nncf.__version__
def load_checkpoint(model, filename, map_location=None, strict=False):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
from nncf.torch import load_state
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
_ = load_state(model, state_dict, strict)
return checkpoint
@contextmanager
def nullcontext():
"""
Context which does nothing
"""
yield
def no_nncf_trace():
"""
Wrapper for original NNCF no_nncf_trace() context
"""
if is_nncf_enabled():
from nncf.torch.dynamic_graph.context import no_nncf_trace as original_no_nncf_trace
return original_no_nncf_trace()
return nullcontext()
def is_in_nncf_tracing():
if not is_nncf_enabled():
return False
from nncf.torch.dynamic_graph.context import get_current_context
ctx = get_current_context()
if ctx is None:
return False
return ctx.is_tracing
def is_accuracy_aware_training_set(nncf_config):
if not is_nncf_enabled():
return False
from nncf.config.utils import is_accuracy_aware_training
is_acc_aware_training_set = is_accuracy_aware_training(nncf_config)
return is_acc_aware_training_set
| get_nncf_version |
test_archive_util.py | """Tests for distutils.archive_util."""
__revision__ = "$Id: test_archive_util.py 75659 2009-10-24 13:29:44Z tarek.ziade $"
import unittest
import os
import tarfile
from os.path import splitdrive
import warnings
from distutils.archive_util import (check_archive_formats, make_tarball,
make_zipfile, make_archive,
ARCHIVE_FORMATS)
from distutils.spawn import find_executable, spawn
from distutils.tests import support
from test.test_support import check_warnings
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
# some tests will fail if zlib is not available
try:
import zlib
except ImportError:
zlib = None
class ArchiveUtilTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEquals(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(find_executable('compress'),
'The compress program is required')
def test_compress_deprecated(self):
tmpdir, tmpdir2, base_name = self._create_files()
# using compress and testing the PendingDeprecationWarning
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress')
finally:
os.chdir(old_dir)
tarball = base_name + '.tar.Z'
self.assertTrue(os.path.exists(tarball))
self.assertEquals(len(w.warnings), 1)
# same test with dry_run
os.remove(tarball)
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress',
dry_run=True)
finally:
os.chdir(old_dir)
self.assertTrue(not os.path.exists(tarball))
self.assertEquals(len(w.warnings), 1)
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
def test_check_archive_formats(self):
self.assertEquals(check_archive_formats(['gztar', 'xxx', 'zip']),
'xxx')
self.assertEquals(check_archive_formats(['gztar', 'zip']), None)
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def | (self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEquals(member.uid, 0)
self.assertEquals(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except:
pass
self.assertEquals(os.getcwd(), current_dir)
finally:
del ARCHIVE_FORMATS['xxx']
def test_suite():
return unittest.makeSuite(ArchiveUtilTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| test_tarfile_root_owner |
database.py | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""database.py
Testing utils for creating database records needed for associations.
"""
from tests.utils import default_board_id, default_repo_id, default_list_id, \
default_issue_id, default_card_id, default_pull_request_id
from app import db
from app.models import Board, Issue, List, PullRequest, Repo, Subscription, \
SubscribedList
def create_board():
"""Create the board needed for the foreign key constraint."""
db.session.add(
Board(
name='board_name',
url=f"https://trello.com/b/{default_board_id}",
trello_board_id=default_board_id
)
)
def create_repo():
"""Create the repo needed for the foreign key constraint."""
db.session.add(
Repo(
name='repo_name',
url='https://github.com/user/repo',
github_repo_id=default_repo_id
)
)
def create_list():
"""Create the list needed for the foreign key constraint."""
db.session.add(
List(
name='list_name',
trello_list_id=default_list_id,
board_id=default_board_id
)
)
def create_subscription(issue_autocard=True, pull_request_autocard=True):
"""Create a subscription."""
db.session.add(
Subscription(
board_id=default_board_id,
repo_id=default_repo_id,
issue_autocard=issue_autocard,
pull_request_autocard=pull_request_autocard
)
)
def create_subscribed_list():
|
def create_issue():
"""Create a GitHub issue representation."""
db.session.add(
Issue(
name='Test adding a new issue',
url='https://github.com/a-organization/a-repo/issues/56',
github_issue_id=default_issue_id,
repo_id=default_repo_id,
trello_board_id=default_board_id,
trello_card_id=default_card_id
)
)
def create_pull_request():
"""Create a GitHub pull request representation."""
db.session.add(
PullRequest(
name='Update README.md',
url='https://github.com/a-organization/a-repo/pulls/57',
github_pull_request_id=default_pull_request_id,
repo_id=default_repo_id,
trello_board_id=default_board_id,
trello_card_id=default_card_id
)
)
| """Create a subscribed list to create cards for."""
db.session.add(
SubscribedList(
subscription_board_id=default_board_id,
subscription_repo_id=default_repo_id,
list_id=default_list_id
)
) |
clusterdomain.go | package clusterdomain
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/rancher/rio/modules/istio/pkg/domains"
"github.com/rancher/rio/modules/istio/pkg/parse"
adminv1 "github.com/rancher/rio/pkg/apis/admin.rio.cattle.io/v1"
riov1 "github.com/rancher/rio/pkg/apis/rio.cattle.io/v1"
"github.com/rancher/rio/pkg/constants"
"github.com/rancher/rio/pkg/constructors"
adminv1controller "github.com/rancher/rio/pkg/generated/controllers/admin.rio.cattle.io/v1"
riov1controller "github.com/rancher/rio/pkg/generated/controllers/rio.cattle.io/v1"
services2 "github.com/rancher/rio/pkg/services"
"github.com/rancher/rio/types"
corev1controller "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/objectset"
"github.com/rancher/wrangler/pkg/relatedresource"
"github.com/rancher/wrangler/pkg/trigger"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
extensionv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
gatewayIngress = "gateway-ingress"
nodeSelectorLabel = "rio.cattle.io/gateway"
serviceDomainUpdate = "service-domain-update"
appDomainHandler = "app-domain-update"
routerDomainUpdate = "router-domain-updater"
)
var (
addressTypes = []v1.NodeAddressType{
v1.NodeExternalIP,
v1.NodeInternalIP,
}
evalTrigger trigger.Trigger
)
func Register(ctx context.Context, rContext *types.Context) error {
h := handler{
apply: rContext.Apply.WithSetID(gatewayIngress).WithStrictCaching().
WithCacheTypes(rContext.K8sNetworking.Extensions().V1beta1().Ingress()),
namespace: rContext.Namespace,
apps: rContext.Rio.Rio().V1().App(),
services: rContext.Rio.Rio().V1().Service(),
routers: rContext.Rio.Rio().V1().Router().Cache(),
publicDomainCache: rContext.Global.Admin().V1().PublicDomain().Cache(),
clusterDomain: rContext.Global.Admin().V1().ClusterDomain(),
secretCache: rContext.Core.Core().V1().Secret().Cache(),
nodeCache: rContext.Core.Core().V1().Node().Cache(),
endpointCache: rContext.Core.Core().V1().Endpoints().Cache(),
}
relatedresource.Watch(ctx, "app-clusterdomain", h.resolveApp,
rContext.Rio.Rio().V1().App(),
rContext.Global.Admin().V1().ClusterDomain())
relatedresource.Watch(ctx, "publicdomain-clusterdomain", h.resolve,
rContext.Rio.Rio().V1().Service(),
rContext.Global.Admin().V1().ClusterDomain())
relatedresource.Watch(ctx, "router-clusterdomain", h.resolveRouter,
rContext.Rio.Rio().V1().Router(),
rContext.Global.Admin().V1().ClusterDomain())
relatedresource.Watch(ctx, "cluster-domain-service", h.resolve,
rContext.Global.Admin().V1().ClusterDomain(),
rContext.Global.Admin().V1().PublicDomain())
if constants.UseIPAddress == "" {
switch constants.InstallMode {
case constants.InstallModeSvclb:
rContext.Core.Core().V1().Service().OnChange(ctx, "endpoints-serviceloadbalancer", h.syncServiceLoadbalancer)
case constants.InstallModeHostport:
rContext.Core.Core().V1().Endpoints().OnChange(ctx, "endpoints", h.syncEndpoint)
case constants.InstallModeIngress:
rContext.K8sNetworking.Extensions().V1beta1().Ingress().OnChange(ctx, "ingress-endpoints", h.syncIngress)
}
} else {
addresses := strings.Split(constants.UseIPAddress, ",")
if err := h.updateClusterDomain(addresses); err != nil {
return err
}
}
rContext.Rio.Rio().V1().Service().OnChange(ctx, serviceDomainUpdate, riov1controller.UpdateServiceOnChange(rContext.Rio.Rio().V1().Service().Updater(), h.syncDomain))
rContext.Rio.Rio().V1().App().OnChange(ctx, appDomainHandler, riov1controller.UpdateAppOnChange(rContext.Rio.Rio().V1().App().Updater(), h.syncAppDomain))
rContext.Rio.Rio().V1().Router().AddGenericHandler(ctx, routerDomainUpdate, generic.UpdateOnChange(rContext.Rio.Rio().V1().Router().Updater(), h.syncRouterDomain))
rContext.Global.Admin().V1().ClusterDomain().OnChange(ctx, "cluster-domain-gateway-ingress", h.syncClusterIngress)
return nil
}
type handler struct {
namespace string
apply apply.Apply
serviceApply apply.Apply
apps riov1controller.AppController
services riov1controller.ServiceController
routers riov1controller.RouterCache
publicDomainCache adminv1controller.PublicDomainCache
clusterDomain adminv1controller.ClusterDomainController
secretCache corev1controller.SecretCache
nodeCache corev1controller.NodeCache
endpointCache corev1controller.EndpointsCache
}
func (h handler) syncClusterIngress(key string, obj *adminv1.ClusterDomain) (*adminv1.ClusterDomain, error) {
if obj == nil || obj.DeletionTimestamp != nil || obj.Name != constants.ClusterDomainName {
return obj, nil
}
os := objectset.NewObjectSet()
domain := ""
if obj.Status.ClusterDomain != "" {
domain = fmt.Sprintf("*.%s", obj.Status.ClusterDomain)
}
if constants.InstallMode == constants.InstallModeIngress {
ingress := constructors.NewIngress(h.namespace, constants.ClusterIngressName, extensionv1beta1.Ingress{
Spec: extensionv1beta1.IngressSpec{
Rules: []extensionv1beta1.IngressRule{
{
Host: domain,
IngressRuleValue: extensionv1beta1.IngressRuleValue{
HTTP: &extensionv1beta1.HTTPIngressRuleValue{
Paths: []extensionv1beta1.HTTPIngressPath{
{
Path: "/rio-gateway",
Backend: extensionv1beta1.IngressBackend{
ServiceName: constants.GatewayName,
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
})
os.Add(ingress)
}
return obj, h.apply.Apply(os)
}
func (h handler) updateClusterDomain(addresses []string) error {
clusterDomain, err := h.clusterDomain.Cache().Get(h.namespace, constants.ClusterDomainName)
if err != nil && !errors.IsNotFound(err) {
return err
}
if clusterDomain == nil {
clusterDomain, err = h.clusterDomain.Get(h.namespace, constants.ClusterDomainName, metav1.GetOptions{})
if err != nil {
return err
}
}
deepcopy := clusterDomain.DeepCopy()
var address []adminv1.Address
for _, ip := range addresses {
address = append(address, adminv1.Address{IP: ip})
}
if !reflect.DeepEqual(deepcopy.Spec.Addresses, address) {
logrus.Infof("Updating cluster domain to address %v", addresses)
}
deepcopy.Spec.Addresses = address
if _, err := h.clusterDomain.Update(deepcopy); err != nil {
return err
}
return err
}
func (h handler) syncServiceLoadbalancer(key string, obj *v1.Service) (*v1.Service, error) {
if obj == nil {
return obj, nil
}
if obj.Spec.Selector["app"] != constants.GatewayName || obj.Namespace != h.namespace || obj.Spec.Type != v1.ServiceTypeLoadBalancer {
return obj, nil
}
var address []string
for _, ingress := range obj.Status.LoadBalancer.Ingress {
if ingress.Hostname == "localhost" {
ingress.IP = "127.0.0.1"
}
address = append(address, ingress.IP)
}
if err := h.updateClusterDomain(address); err != nil {
return obj, err
}
return obj, nil
}
func (h handler) syncEndpoint(key string, endpoint *v1.Endpoints) (*v1.Endpoints, error) {
if endpoint == nil {
return nil, nil
}
if endpoint.Namespace != h.namespace || endpoint.Name != constants.GatewayName {
return endpoint, nil
}
var ips []string
for _, subset := range endpoint.Subsets {
for _, addr := range subset.Addresses {
if addr.NodeName == nil {
continue
}
node, err := h.nodeCache.Get(*addr.NodeName)
if err != nil {
return nil, err
}
nodeIP := getNodeIP(node)
if nodeIP != "" {
ips = append(ips, nodeIP)
}
}
}
if err := h.updateClusterDomain(ips); err != nil {
return endpoint, err
}
return endpoint, nil
}
func getNodeIP(node *v1.Node) string {
for _, addrType := range addressTypes {
for _, addr := range node.Status.Addresses {
if addrType == addr.Type {
return addr.Address
}
}
}
return ""
}
func (h handler) syncIngress(key string, ingress *extensionv1beta1.Ingress) (*extensionv1beta1.Ingress, error) {
if ingress == nil {
return ingress, nil
}
if ingress.Namespace == h.namespace && ingress.Name == constants.ClusterIngressName {
var ips []string
for _, ip := range ingress.Status.LoadBalancer.Ingress {
if ip.IP != "" {
ips = append(ips, ip.IP)
}
}
return ingress, h.updateClusterDomain(ips)
}
return ingress, nil
}
func (h handler) onChangeNode(key string, node *v1.Node) (*v1.Node, error) {
if node == nil {
return node, nil
}
if _, ok := node.Labels[nodeSelectorLabel]; !ok {
return node, nil
}
if err := h.updateDaemonSets(); err != nil {
return node, err
}
return node, nil
}
func (h handler) updateDaemonSets() error {
svc, err := h.services.Cache().Get(h.namespace, constants.GatewayName)
if err != nil {
return err
}
deepcopy := svc.DeepCopy()
deepcopy.SystemSpec.PodSpec.NodeSelector = map[string]string{
nodeSelectorLabel: "true",
}
if _, err := h.services.Update(deepcopy); err != nil {
return err
}
return err
}
func (h handler) resolve(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) {
switch obj.(type) {
case *adminv1.ClusterDomain:
svcs, err := h.services.Cache().List("", labels.NewSelector())
if err != nil {
return nil, err
}
var keys []relatedresource.Key
for _, svc := range svcs {
keys = append(keys, relatedresource.Key{
Name: svc.Name,
Namespace: svc.Namespace,
})
}
return keys, nil
case *adminv1.PublicDomain:
return []relatedresource.Key{
{
Name: constants.ClusterDomainName,
Namespace: h.namespace,
},
}, nil
}
return nil, nil
}
func (h handler) resolveRouter(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) {
apps, err := h.routers.List("", labels.NewSelector())
if err != nil {
return nil, err
}
var keys []relatedresource.Key
for _, app := range apps {
keys = append(keys, relatedresource.Key{
Name: app.Name,
Namespace: app.Namespace,
})
}
return keys, nil
}
func (h handler) resolveApp(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) {
switch obj.(type) {
case *adminv1.ClusterDomain:
apps, err := h.apps.Cache().List("", labels.NewSelector())
if err != nil {
return nil, err
}
var keys []relatedresource.Key
for _, app := range apps {
keys = append(keys, relatedresource.Key{
Name: app.Name,
Namespace: app.Namespace,
})
}
return keys, nil
}
return nil, nil
}
func (h handler) syncDomain(key string, svc *riov1.Service) (*riov1.Service, error) {
if svc == nil {
return svc, nil
}
if svc.DeletionTimestamp != nil {
return svc, nil
}
clusterDomain, err := h.clusterDomain.Cache().Get(h.namespace, constants.ClusterDomainName)
if err != nil {
return svc, err
}
updateDomain(svc, clusterDomain)
return svc, nil
}
func (h handler) syncAppDomain(key string, obj *riov1.App) (*riov1.App, error) {
if obj == nil {
return obj, nil
}
if obj.DeletionTimestamp != nil {
return obj, nil
}
clusterDomain, err := h.clusterDomain.Cache().Get(h.namespace, constants.ClusterDomainName)
if err != nil {
return obj, err
}
updateAppDomain(obj, clusterDomain)
return obj, nil
}
func updateAppDomain(app *riov1.App, clusterDomain *adminv1.ClusterDomain) {
public := true
for _, svc := range app.Spec.Revisions {
if !svc.Public {
public = false
break
}
}
protocol := "http"
if clusterDomain.Status.HTTPSSupported {
protocol = "https"
}
var endpoints []string
if public && clusterDomain.Status.ClusterDomain != "" {
endpoints = append(endpoints, fmt.Sprintf("%s://%s", protocol, domains.GetExternalDomain(app.Name, app.Namespace, clusterDomain.Status.ClusterDomain)))
}
for _, pd := range app.Status.PublicDomains {
endpoints = append(endpoints, fmt.Sprintf("%s://%s", protocol, pd))
}
app.Status.Endpoints = parse.FormatEndpoint(protocol, endpoints)
}
func updateDomain(service *riov1.Service, clusterDomain *adminv1.ClusterDomain) |
func (h handler) syncRouterDomain(key string, obj runtime.Object) (runtime.Object, error) {
if obj == nil {
return nil, nil
}
clusterDomain, err := h.clusterDomain.Cache().Get(h.namespace, constants.ClusterDomainName)
if err != nil {
return obj, err
}
updateRouterDomain(obj.(*riov1.Router), clusterDomain)
return obj, nil
}
func updateRouterDomain(router *riov1.Router, clusterDomain *adminv1.ClusterDomain) {
protocol := "http"
if clusterDomain.Status.HTTPSSupported {
protocol = "https"
}
router.Status.Endpoints = []string{
fmt.Sprintf("%s://%s", protocol, domains.GetExternalDomain(router.Name, router.Namespace, clusterDomain.Status.ClusterDomain)),
}
for _, pd := range router.Status.PublicDomains {
router.Status.Endpoints = append(router.Status.Endpoints, fmt.Sprintf("%s://%s", protocol, pd))
}
router.Status.Endpoints = parse.FormatEndpoint(protocol, router.Status.Endpoints)
}
| {
public := domains.IsPublic(service)
protocol := "http"
if clusterDomain.Status.HTTPSSupported {
protocol = "https"
}
var endpoints []string
if public && clusterDomain.Status.ClusterDomain != "" {
app, version := services2.AppAndVersion(service)
endpoints = append(endpoints, fmt.Sprintf("%s://%s", protocol, domains.GetExternalDomain(app+"-"+version, service.Namespace, clusterDomain.Status.ClusterDomain)))
}
for _, pd := range service.Status.PublicDomains {
endpoints = append(endpoints, fmt.Sprintf("%s://%s", protocol, pd))
}
service.Status.Endpoints = parse.FormatEndpoint(protocol, endpoints)
} |
styled.js | import styled from "styled-components";
import { StyledCard } from "../NormalCard/styled";
import { SuccessAnimation } from "components/Global/transitions.styled";
const SuccessText = styled.p`
position: relative;
top: 40px;
| `;
const StyledSuccessCard = styled(StyledCard)`
justify-content: center;
background: ${({ success, theme }) => (success === "ERROR" ? theme.RED_FORBIDDEN : theme.SUCESS)};
i {
font-size: 5rem;
color: white;
}
@media (min-width: 768px) {
animation: ${SuccessAnimation} 2s;
}
`;
export { StyledSuccessCard, SuccessText }; | color: white;
font-size: ${({ theme }) => theme.LARGE};
font-weight: bold; |
raw-loader.js | exports.__es6Module = true;
exports.default = function(source) { | return new Buffer(source.toString("hex") + source.toString("utf-8"), "utf-8"); // eslint-disable-line
};
exports.raw = true; |
|
JURA108TestCase.py | from tir import Webapp
import unittest
class JURA108(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.SetTIRConfig(config_name="user", value="daniel.frodrigues")
inst.oHelper.SetTIRConfig(config_name="password", value="1")
inst.oHelper.Setup('SIGAJURI','','T1','D MG 01 ','76')
inst.oHelper.Program('JURA106')
inst.oHelper.AddParameter("MV_JHBPESF", "", "1", "", "")
inst.oHelper.SetParameters()
def test_JURA108_CT001(self):
self.oHelper.SetValue("cValor","Contencioso - Fup",name_attr=True)
self.oHelper.WaitFieldValue("NTA_CTIPO","")
self.oHelper.SetValue('NTA_CTIPO', '00001')
self.oHelper.ClickLabel('Pesquisar')
self.oHelper.ClickGridCell("Código Assunto Jurídico",row=1)
self.oHelper.ClickLabel('Exportação Personalizada')
self.oHelper.SetValue("cCmbTabela","000011 - Acordos (NYP001)",name_attr=True)
self.oHelper.SetButton("Add. Todos >>")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("<< Rem. Todos")
self.oHelper.SetButton("Sim")
self.oHelper.SetValue("cCmbTabela","000005 - Follow-ups (NTA001)",name_attr=True)
self.oHelper.SetValue("cGetSearch","HORA",name_attr=True)
self.oHelper.SetButton("Pesquisar")
self.oHelper.SetButton("Adicionar >>")
self.oHelper.SetValue("cGetSearch","DT FOLLOW-UP",name_attr=True)
self.oHelper.SetButton("Pesquisar")
self.oHelper.SetButton("Adicionar >>")
self.oHelper.SetValue("cGetSearch","NOME DO PARTICI",name_attr=True)
self.oHelper.SetButton("Pesquisar")
self.oHelper.SetButton("Adicionar >>")
self.oHelper.SetButton("Mover para Baixo")
self.oHelper.SetButton("Mover para Cima")
self.oHelper.SetValue("cGetRename","Hora F-Up",name_attr=True)
self.oHelper.SetButton("Renomear")
self.oHelper.SetButton("<< Remove")
self.oHelper.SetButton("Filt. Agrup.")
self.oHelper.SetButton("Ok")
self.oHelper.SetButton("Exportar")
self.oHelper.SetValue("cGetNewConfig","FOLLOW UP - MULTIPLOS RESPONSAVEIS",name_attr=True)
self.oHelper.SetButton("Salvar Como")
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.SetButton("Sair")
self.oHelper.ClickLabel("Sair")
self.oHelper.AssertTrue()
@classmethod
def tear | t):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| DownClass(ins |
clear_injectables_util.py | from typing import Union, Set
from injectable import InjectionContainer
from injectable.common_utils import get_dependency_name
from injectable.container.injectable import Injectable
from injectable.constants import DEFAULT_NAMESPACE
def clear_injectables(
dependency: Union[type, str], namespace: str = None
) -> Set[Injectable]:
| """
Utility function to clear all injectables registered for the dependency in a given
namespace. Returns a set containing all cleared injectables.
:param dependency: class or qualifier of the dependency.
:param namespace: (optional) namespace in which the injectable will be registered.
Defaults to :const:`injectable.constants.DEFAULT_NAMESPACE`.
Usage::
>>> from injectable.testing import clear_injectables
>>> clear_injectables("foo")
.. versionadded:: 3.3.0
"""
namespace = InjectionContainer.NAMESPACES[namespace or DEFAULT_NAMESPACE]
if isinstance(dependency, str):
injectables = namespace.qualifier_registry[dependency]
namespace.qualifier_registry[dependency] = set()
else:
dependency_name = get_dependency_name(dependency)
injectables = namespace.class_registry[dependency_name]
namespace.class_registry[dependency_name] = set()
return injectables |
|
default_camera_system.rs | use crate::{
core::{
components::maths::{
camera::{Camera, DefaultCamera},
transform::Transform,
},
resources::window::Window,
},
legion::{*, systems::CommandBuffer},
};
/// System responsible of adding a Camera on each entity with a DefaultCamera component
#[system(for_each)]
pub(crate) fn default_camera(
cmd: &mut CommandBuffer,
#[resource] window_dimension: &Window,
_c: &DefaultCamera,
entity: &Entity,
) {
cmd.remove_component::<DefaultCamera>(*entity);
let mut camera = Camera::new(window_dimension.width() as f32 / window_dimension.dpi() as f32,
window_dimension.height() as f32 / window_dimension.dpi() as f32); | camera,
);
cmd.add_component(*entity, Transform::default());
}
/// System responsible of applying dpi to each camera
#[system(for_each)]
pub(crate) fn camera_dpi(
#[resource] window_dimension: &Window,
c: &mut Camera,
) {
c.dpi = window_dimension.dpi();
} | camera.dpi = window_dimension.dpi();
cmd.add_component(
*entity, |
get_bst_feature_api_ct.py | '''
*
* (C) Copyright Broadcom Corporation 2015
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
'''
#!/usr/bin/env python
import os
import sys
import ConfigParser
import json
import pprint
from bstUtil import *
from BstRestService import *
import bstRest as rest
class get_bst_feature_api_ct(object):
def __init__(self,ip,port,params="",debug=False):
self.obj = BstRestService(ip,port)
self.debug = debug
self.params = params
def step1(self,jsonData):
"""Get BST Feature Status"""
try:
resp = self.obj.postResponse(jsonData)
if resp[0] == "INVALID":
return "FAIL","Connection refused/Invalid JSON request... Please check the ip address provided in 'ini' file/BroadViewAgent is running or not/JSON data is valid or not ..."
except Exception,e:
return "FAIL","Unable to perform the rest call with given JSON data, Occured Exception ... "+str(e)
try:
self.obj.debugJsonPrint(self.debug,jsonData,resp)
except:
return "FAIL","Invalid JSON Response data received"
if returnStatus(resp[0], 200)[0] == "FAIL": return "FAIL","Obtained {0}".format(resp[0])
if not resp[1]: return "FAIL","Got null response"
resp_ = resp[1].replace('Content-Type: text/json', '')
data_dict = json.loads(resp_)
if not "result" in data_dict: return "FAIL","No Result key in Response JSON Data"
result = data_dict['result']
plist = self.params.split(",")
plist = [p.strip() for p in plist] |
def main(ip_address,port):
jsonText = ConfigParser.ConfigParser()
cwdir, f = os.path.split(__file__)
jsonText.read(cwdir + '/testCaseJsonStrings.ini')
json_dict = dict(jsonText.items('get_bst_feature_api_ct'))
params=json_dict.get("paramslist","")
tcObj = get_bst_feature_api_ct(ip_address,port,params,debug=True)
stepResultMap = {}
printStepHeader()
for step in tcObj.getSteps():
if step in json_dict:
resp=getattr(tcObj,step)(json_dict[step])
desc=getattr(tcObj,step).__doc__
stepResultMap[step] = resp
printStepResult(step,desc,resp[0], resp[1])
else:
resp=getattr(tcObj,step)()
desc=""
stepResultMap[step] = resp
printStepResult(step,desc,resp[0], resp[1])
if resp[0] == 'FAIL': break
printStepFooter()
statusMsgTuple = [ s for s in stepResultMap.values() if s[0] == "FAIL" ]
if statusMsgTuple:
return False, statusMsgTuple[0][1]
return True, "Test Case Passed"
if __name__ == '__main__':
main() | return returnStatus(sorted(plist),sorted(result.keys()),"","get_bst_feature params lists contains invalid param keys")
def getSteps(self):
return sorted([ i for i in dir(self) if i.startswith('step') ], key=lambda item: int(item.replace('step',''))) |
ad_group_type.pb.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0-devel
// protoc v3.17.3
// source: google/ads/googleads/v10/enums/ad_group_type.proto
package enums
import (
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Enum listing the possible types of an ad group.
type AdGroupTypeEnum_AdGroupType int32
const (
// The type has not been specified.
AdGroupTypeEnum_UNSPECIFIED AdGroupTypeEnum_AdGroupType = 0
// The received value is not known in this version.
//
// This is a response-only value.
AdGroupTypeEnum_UNKNOWN AdGroupTypeEnum_AdGroupType = 1
// The default ad group type for Search campaigns.
AdGroupTypeEnum_SEARCH_STANDARD AdGroupTypeEnum_AdGroupType = 2
// The default ad group type for Display campaigns.
AdGroupTypeEnum_DISPLAY_STANDARD AdGroupTypeEnum_AdGroupType = 3
// The ad group type for Shopping campaigns serving standard product ads.
AdGroupTypeEnum_SHOPPING_PRODUCT_ADS AdGroupTypeEnum_AdGroupType = 4
// The default ad group type for Hotel campaigns.
AdGroupTypeEnum_HOTEL_ADS AdGroupTypeEnum_AdGroupType = 6
// The type for ad groups in Smart Shopping campaigns.
AdGroupTypeEnum_SHOPPING_SMART_ADS AdGroupTypeEnum_AdGroupType = 7
// Short unskippable in-stream video ads.
AdGroupTypeEnum_VIDEO_BUMPER AdGroupTypeEnum_AdGroupType = 8
// TrueView (skippable) in-stream video ads.
AdGroupTypeEnum_VIDEO_TRUE_VIEW_IN_STREAM AdGroupTypeEnum_AdGroupType = 9
// TrueView in-display video ads.
AdGroupTypeEnum_VIDEO_TRUE_VIEW_IN_DISPLAY AdGroupTypeEnum_AdGroupType = 10
// Unskippable in-stream video ads.
AdGroupTypeEnum_VIDEO_NON_SKIPPABLE_IN_STREAM AdGroupTypeEnum_AdGroupType = 11
// Outstream video ads.
AdGroupTypeEnum_VIDEO_OUTSTREAM AdGroupTypeEnum_AdGroupType = 12
// Ad group type for Dynamic Search Ads ad groups.
AdGroupTypeEnum_SEARCH_DYNAMIC_ADS AdGroupTypeEnum_AdGroupType = 13
// The type for ad groups in Shopping Comparison Listing campaigns.
AdGroupTypeEnum_SHOPPING_COMPARISON_LISTING_ADS AdGroupTypeEnum_AdGroupType = 14
// The ad group type for Promoted Hotel ad groups.
AdGroupTypeEnum_PROMOTED_HOTEL_ADS AdGroupTypeEnum_AdGroupType = 15
// Video responsive ad groups.
AdGroupTypeEnum_VIDEO_RESPONSIVE AdGroupTypeEnum_AdGroupType = 16
// Video efficient reach ad groups.
AdGroupTypeEnum_VIDEO_EFFICIENT_REACH AdGroupTypeEnum_AdGroupType = 17
// Ad group type for Smart campaigns.
AdGroupTypeEnum_SMART_CAMPAIGN_ADS AdGroupTypeEnum_AdGroupType = 18
)
// Enum value maps for AdGroupTypeEnum_AdGroupType.
var (
AdGroupTypeEnum_AdGroupType_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNKNOWN",
2: "SEARCH_STANDARD",
3: "DISPLAY_STANDARD",
4: "SHOPPING_PRODUCT_ADS",
6: "HOTEL_ADS",
7: "SHOPPING_SMART_ADS",
8: "VIDEO_BUMPER",
9: "VIDEO_TRUE_VIEW_IN_STREAM",
10: "VIDEO_TRUE_VIEW_IN_DISPLAY",
11: "VIDEO_NON_SKIPPABLE_IN_STREAM",
12: "VIDEO_OUTSTREAM",
13: "SEARCH_DYNAMIC_ADS",
14: "SHOPPING_COMPARISON_LISTING_ADS",
15: "PROMOTED_HOTEL_ADS",
16: "VIDEO_RESPONSIVE",
17: "VIDEO_EFFICIENT_REACH",
18: "SMART_CAMPAIGN_ADS",
}
AdGroupTypeEnum_AdGroupType_value = map[string]int32{
"UNSPECIFIED": 0,
"UNKNOWN": 1,
"SEARCH_STANDARD": 2,
"DISPLAY_STANDARD": 3,
"SHOPPING_PRODUCT_ADS": 4,
"HOTEL_ADS": 6,
"SHOPPING_SMART_ADS": 7,
"VIDEO_BUMPER": 8,
"VIDEO_TRUE_VIEW_IN_STREAM": 9,
"VIDEO_TRUE_VIEW_IN_DISPLAY": 10,
"VIDEO_NON_SKIPPABLE_IN_STREAM": 11,
"VIDEO_OUTSTREAM": 12,
"SEARCH_DYNAMIC_ADS": 13,
"SHOPPING_COMPARISON_LISTING_ADS": 14,
"PROMOTED_HOTEL_ADS": 15,
"VIDEO_RESPONSIVE": 16,
"VIDEO_EFFICIENT_REACH": 17,
"SMART_CAMPAIGN_ADS": 18,
}
)
func (x AdGroupTypeEnum_AdGroupType) Enum() *AdGroupTypeEnum_AdGroupType {
p := new(AdGroupTypeEnum_AdGroupType)
*p = x
return p
}
func (x AdGroupTypeEnum_AdGroupType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AdGroupTypeEnum_AdGroupType) Descriptor() protoreflect.EnumDescriptor {
return file_google_ads_googleads_v10_enums_ad_group_type_proto_enumTypes[0].Descriptor()
}
func (AdGroupTypeEnum_AdGroupType) Type() protoreflect.EnumType {
return &file_google_ads_googleads_v10_enums_ad_group_type_proto_enumTypes[0]
}
func (x AdGroupTypeEnum_AdGroupType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AdGroupTypeEnum_AdGroupType.Descriptor instead.
func (AdGroupTypeEnum_AdGroupType) EnumDescriptor() ([]byte, []int) {
return file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescGZIP(), []int{0, 0}
}
// Defines types of an ad group, specific to a particular campaign channel
// type. This type drives validations that restrict which entities can be
// added to the ad group.
type AdGroupTypeEnum struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *AdGroupTypeEnum) Reset() {
*x = AdGroupTypeEnum{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v10_enums_ad_group_type_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AdGroupTypeEnum) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AdGroupTypeEnum) ProtoMessage() {}
func (x *AdGroupTypeEnum) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v10_enums_ad_group_type_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AdGroupTypeEnum.ProtoReflect.Descriptor instead.
func (*AdGroupTypeEnum) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescGZIP(), []int{0}
}
var File_google_ads_googleads_v10_enums_ad_group_type_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDesc = []byte{
0x0a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x31, 0x30, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73,
0x2f, 0x61, 0x64, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x31, 0x30, 0x2e, 0x65,
0x6e, 0x75, 0x6d, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x22, 0xd2, 0x03, 0x0a, 0x0f, 0x41, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x54, 0x79,
0x70, 0x65, 0x45, 0x6e, 0x75, 0x6d, 0x22, 0xbe, 0x03, 0x0a, 0x0b, 0x41, 0x64, 0x47, 0x72, 0x6f,
0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
0x57, 0x4e, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x41, 0x52, 0x43, 0x48, 0x5f, 0x53,
0x54, 0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x49, 0x53,
0x50, 0x4c, 0x41, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x03, 0x12,
0x18, 0x0a, 0x14, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x52, 0x4f, 0x44,
0x55, 0x43, 0x54, 0x5f, 0x41, 0x44, 0x53, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4f, 0x54,
0x45, 0x4c, 0x5f, 0x41, 0x44, 0x53, 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x48, 0x4f, 0x50,
0x50, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x4d, 0x41, 0x52, 0x54, 0x5f, 0x41, 0x44, 0x53, 0x10, 0x07,
0x12, 0x10, 0x0a, 0x0c, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x42, 0x55, 0x4d, 0x50, 0x45, 0x52,
0x10, 0x08, 0x12, 0x1d, 0x0a, 0x19, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x45,
0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x49, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10,
0x09, 0x12, 0x1e, 0x0a, 0x1a, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x5f,
0x56, 0x49, 0x45, 0x57, 0x5f, 0x49, 0x4e, 0x5f, 0x44, 0x49, 0x53, 0x50, 0x4c, 0x41, 0x59, 0x10,
0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x4e, 0x4f, 0x4e, 0x5f, 0x53,
0x4b, 0x49, 0x50, 0x50, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x45,
0x41, 0x4d, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x4f, 0x55,
0x54, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x0c, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x41,
0x52, 0x43, 0x48, 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x5f, 0x41, 0x44, 0x53, 0x10,
0x0d, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x43, 0x4f,
0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x49, 0x4e, 0x47,
0x5f, 0x41, 0x44, 0x53, 0x10, 0x0e, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x4d, 0x4f, 0x54,
0x45, 0x44, 0x5f, 0x48, 0x4f, 0x54, 0x45, 0x4c, 0x5f, 0x41, 0x44, 0x53, 0x10, 0x0f, 0x12, 0x14,
0x0a, 0x10, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x49,
0x56, 0x45, 0x10, 0x10, 0x12, 0x19, 0x0a, 0x15, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x45, 0x46,
0x46, 0x49, 0x43, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x43, 0x48, 0x10, 0x11, 0x12,
0x16, 0x0a, 0x12, 0x53, 0x4d, 0x41, 0x52, 0x54, 0x5f, 0x43, 0x41, 0x4d, 0x50, 0x41, 0x49, 0x47,
0x4e, 0x5f, 0x41, 0x44, 0x53, 0x10, 0x12, 0x42, 0xea, 0x01, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x31, 0x30, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x42, 0x10,
0x41, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x54, 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x31, 0x30, 0x2f, 0x65, 0x6e, 0x75, 0x6d,
0x73, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x1e,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x31, 0x30, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xca, 0x02,
0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x31, 0x30, 0x5c, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xea,
0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x30, 0x3a, 0x3a, 0x45,
0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescOnce sync.Once
file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescData = file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDesc
)
func file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescData)
})
return file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDescData
}
var file_google_ads_googleads_v10_enums_ad_group_type_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_ads_googleads_v10_enums_ad_group_type_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_ads_googleads_v10_enums_ad_group_type_proto_goTypes = []interface{}{
(AdGroupTypeEnum_AdGroupType)(0), // 0: google.ads.googleads.v10.enums.AdGroupTypeEnum.AdGroupType
(*AdGroupTypeEnum)(nil), // 1: google.ads.googleads.v10.enums.AdGroupTypeEnum
}
var file_google_ads_googleads_v10_enums_ad_group_type_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v10_enums_ad_group_type_proto_init() }
func file_google_ads_googleads_v10_enums_ad_group_type_proto_init() {
if File_google_ads_googleads_v10_enums_ad_group_type_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v10_enums_ad_group_type_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AdGroupTypeEnum); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDesc, | NumServices: 0,
},
GoTypes: file_google_ads_googleads_v10_enums_ad_group_type_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v10_enums_ad_group_type_proto_depIdxs,
EnumInfos: file_google_ads_googleads_v10_enums_ad_group_type_proto_enumTypes,
MessageInfos: file_google_ads_googleads_v10_enums_ad_group_type_proto_msgTypes,
}.Build()
File_google_ads_googleads_v10_enums_ad_group_type_proto = out.File
file_google_ads_googleads_v10_enums_ad_group_type_proto_rawDesc = nil
file_google_ads_googleads_v10_enums_ad_group_type_proto_goTypes = nil
file_google_ads_googleads_v10_enums_ad_group_type_proto_depIdxs = nil
} | NumEnums: 1,
NumMessages: 1,
NumExtensions: 0, |
AccountService.d.ts | import type { APIClient } from '@wireapp/api-client';
import type { CallConfigData } from '@wireapp/api-client/src/account/CallConfigData';
export declare class | {
private readonly apiClient;
constructor(apiClient: APIClient);
getCallConfig(): Promise<CallConfigData>;
}
| AccountService |
tar_interpreter.go | package postgres
import (
"archive/tar"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/enix/wal-g/internal"
"github.com/enix/wal-g/utility"
"github.com/pkg/errors"
"github.com/spf13/viper"
"github.com/wal-g/tracelog"
)
// FileTarInterpreter extracts input to disk.
type FileTarInterpreter struct {
DBDataDirectory string
Sentinel BackupSentinelDto
FilesMetadata FilesMetadataDto
FilesToUnwrap map[string]bool
UnwrapResult *UnwrapResult
createNewIncrementalFiles bool
}
func NewFileTarInterpreter(
dbDataDirectory string, sentinel BackupSentinelDto, filesMetadata FilesMetadataDto,
filesToUnwrap map[string]bool, createNewIncrementalFiles bool,
) *FileTarInterpreter {
return &FileTarInterpreter{dbDataDirectory, sentinel, filesMetadata,
filesToUnwrap, newUnwrapResult(), createNewIncrementalFiles}
}
// write file from reader to local file
func WriteLocalFile(fileReader io.Reader, header *tar.Header, localFile *os.File, fsync bool) error {
_, err := io.Copy(localFile, fileReader)
if err != nil {
err1 := os.Remove(localFile.Name())
if err1 != nil {
tracelog.ErrorLogger.Fatalf("Interpret: failed to remove localFile '%s' because of error: %v",
localFile.Name(), err1)
}
return errors.Wrap(err, "Interpret: copy failed")
}
mode := os.FileMode(header.Mode)
if err = localFile.Chmod(mode); err != nil {
return errors.Wrap(err, "Interpret: chmod failed")
}
if fsync {
err = localFile.Sync()
return errors.Wrap(err, "Interpret: fsync failed")
}
return nil
}
// TODO : unit tests
func (tarInterpreter *FileTarInterpreter) unwrapRegularFileOld(fileReader io.Reader,
fileInfo *tar.Header,
targetPath string,
fsync bool) error {
if tarInterpreter.FilesToUnwrap != nil {
if _, ok := tarInterpreter.FilesToUnwrap[fileInfo.Name]; !ok {
// don't have to unwrap it this time
tracelog.DebugLogger.Printf("Don't have to unwrap '%s' this time\n", fileInfo.Name)
return nil
}
}
fileDescription, haveFileDescription := tarInterpreter.FilesMetadata.Files[fileInfo.Name]
// If this file is incremental we use it's base version from incremental path
if haveFileDescription && tarInterpreter.Sentinel.IsIncremental() && fileDescription.IsIncremented {
err := ApplyFileIncrement(targetPath, fileReader, tarInterpreter.createNewIncrementalFiles, fsync)
return errors.Wrapf(err, "Interpret: failed to apply increment for '%s'", targetPath)
}
err := PrepareDirs(fileInfo.Name, targetPath)
if err != nil {
return errors.Wrap(err, "Interpret: failed to create all directories")
}
file, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return errors.Wrapf(err, "failed to create new file: '%s'", targetPath)
}
defer utility.LoggedClose(file, "")
return WriteLocalFile(fileReader, fileInfo, file, fsync)
}
// Interpret extracts a tar file to disk and creates needed directories.
// Returns the first error encountered. Calls fsync after each file
// is written successfully.
func (tarInterpreter *FileTarInterpreter) Interpret(fileReader io.Reader, fileInfo *tar.Header) error {
tracelog.DebugLogger.Println("Interpreting: ", fileInfo.Name)
targetPath := path.Join(tarInterpreter.DBDataDirectory, fileInfo.Name)
fsync := !viper.GetBool(internal.TarDisableFsyncSetting)
switch fileInfo.Typeflag {
case tar.TypeReg, tar.TypeRegA:
// temporary switch to determine if new unwrap logic should be used
if useNewUnwrapImplementation {
return tarInterpreter.unwrapRegularFileNew(fileReader, fileInfo, targetPath, fsync)
}
return tarInterpreter.unwrapRegularFileOld(fileReader, fileInfo, targetPath, fsync)
case tar.TypeDir:
err := os.MkdirAll(targetPath, 0755)
if err != nil {
return errors.Wrapf(err, "Interpret: failed to create all directories in %s", targetPath)
}
if err = os.Chmod(targetPath, os.FileMode(fileInfo.Mode)); err != nil {
return errors.Wrap(err, "Interpret: chmod failed")
}
case tar.TypeLink:
if err := os.Link(fileInfo.Name, targetPath); err != nil {
return errors.Wrapf(err, "Interpret: failed to create hardlink %s", targetPath)
}
case tar.TypeSymlink:
if err := os.Symlink(fileInfo.Name, targetPath); err != nil {
return errors.Wrapf(err, "Interpret: failed to create symlink %s", targetPath)
}
}
return nil
}
// PrepareDirs makes sure all dirs exist
func | (fileName string, targetPath string) error {
if fileName == targetPath {
return nil // because it runs in the local directory
}
base := filepath.Base(fileName)
dir := strings.TrimSuffix(targetPath, base)
err := os.MkdirAll(dir, 0755)
return err
}
| PrepareDirs |
replication_controller.rs | // Generated from definition io.k8s.api.core.v1.ReplicationController
/// ReplicationController represents the configuration of a replication controller.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct ReplicationController {
/// If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
pub metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta>,
/// Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
pub spec: Option<crate::v1_15::api::core::v1::ReplicationControllerSpec>,
/// Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
pub status: Option<crate::v1_15::api::core::v1::ReplicationControllerStatus>,
}
// Begin /v1/ReplicationController
// Generated from operation createCoreV1NamespacedReplicationController
impl ReplicationController {
/// create a ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedReplicationControllerResponse`]`>` constructor, or [`CreateNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_replication_controller(
namespace: &str,
body: &crate::v1_15::api::core::v1::ReplicationController,
optional: CreateNamespacedReplicationControllerOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedReplicationControllerResponse>), crate::RequestError> {
let CreateNamespacedReplicationControllerOptional {
dry_run,
field_manager,
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(dry_run) = dry_run {
__query_pairs.append_pair("dryRun", dry_run);
}
if let Some(field_manager) = field_manager {
__query_pairs.append_pair("fieldManager", field_manager);
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`ReplicationController::create_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct CreateNamespacedReplicationControllerOptional<'a> {
/// When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
pub dry_run: Option<&'a str>,
/// fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
pub field_manager: Option<&'a str>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<CreateNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::create_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum CreateNamespacedReplicationControllerResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Created(crate::v1_15::api::core::v1::ReplicationController),
Accepted(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for CreateNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedReplicationControllerResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedReplicationControllerResponse::Created(result), buf.len()))
},
http::StatusCode::ACCEPTED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedReplicationControllerResponse::Accepted(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((CreateNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteCoreV1CollectionNamespacedReplicationController
impl ReplicationController {
/// delete collection of ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedReplicationControllerResponse`]`>` constructor, or [`DeleteCollectionNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_replication_controller(
namespace: &str,
delete_optional: crate::v1_15::DeleteOptional<'_>,
list_optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedReplicationControllerResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteCollectionNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::delete_collection_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteCollectionNamespacedReplicationControllerResponse {
OkStatus(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_15::api::core::v1::ReplicationControllerList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteCollectionNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedReplicationControllerResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedReplicationControllerResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteCollectionNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteCoreV1NamespacedReplicationController
impl ReplicationController {
/// delete a ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedReplicationControllerResponse`]`>` constructor, or [`DeleteNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_replication_controller(
name: &str,
namespace: &str,
optional: crate::v1_15::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedReplicationControllerResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::delete_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteNamespacedReplicationControllerResponse {
OkStatus(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_15::api::core::v1::ReplicationController),
Accepted(crate::v1_15::apimachinery::pkg::apis::meta::v1::Status),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedReplicationControllerResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedReplicationControllerResponse::OkValue(result), buf.len()))
}
},
http::StatusCode::ACCEPTED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((DeleteNamespacedReplicationControllerResponse::Accepted(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation listCoreV1NamespacedReplicationController
impl ReplicationController {
/// list or watch objects of kind ReplicationController
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedReplicationControllerResponse`]`>` constructor, or [`ListNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_replication_controller(
namespace: &str,
optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedReplicationControllerResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::list_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListNamespacedReplicationControllerResponse {
Ok(crate::v1_15::api::core::v1::ReplicationControllerList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListNamespacedReplicationControllerResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation listCoreV1ReplicationControllerForAllNamespaces
impl ReplicationController {
/// list or watch objects of kind ReplicationController
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListReplicationControllerForAllNamespacesResponse`]`>` constructor, or [`ListReplicationControllerForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_replication_controller_for_all_namespaces(
optional: crate::v1_15::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListReplicationControllerForAllNamespacesResponse>), crate::RequestError> {
let __url = "/api/v1/replicationcontrollers?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListReplicationControllerForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::list_replication_controller_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListReplicationControllerForAllNamespacesResponse {
Ok(crate::v1_15::api::core::v1::ReplicationControllerList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListReplicationControllerForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListReplicationControllerForAllNamespacesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListReplicationControllerForAllNamespacesResponse::Other(result), read))
},
}
}
}
// Generated from operation patchCoreV1NamespacedReplicationController
impl ReplicationController {
/// partially update the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedReplicationControllerResponse`]`>` constructor, or [`PatchNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_replication_controller(
name: &str,
namespace: &str,
body: &crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_15::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedReplicationControllerResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::patch_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedReplicationControllerResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedReplicationControllerResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation patchCoreV1NamespacedReplicationControllerStatus
impl ReplicationController {
/// partially update status of the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedReplicationControllerStatusResponse`]`>` constructor, or [`PatchNamespacedReplicationControllerStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_replication_controller_status(
name: &str,
namespace: &str,
body: &crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_15::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedReplicationControllerStatusResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_15::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedReplicationControllerStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::patch_namespaced_replication_controller_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedReplicationControllerStatusResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedReplicationControllerStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedReplicationControllerStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedReplicationControllerStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation readCoreV1NamespacedReplicationController
impl ReplicationController {
/// read the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedReplicationControllerResponse`]`>` constructor, or [`ReadNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_replication_controller(
name: &str,
namespace: &str,
optional: ReadNamespacedReplicationControllerOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedReplicationControllerResponse>), crate::RequestError> {
let ReadNamespacedReplicationControllerOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`ReplicationController::read_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedReplicationControllerOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::read_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedReplicationControllerResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedReplicationControllerResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation readCoreV1NamespacedReplicationControllerStatus
impl ReplicationController {
/// read status of the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedReplicationControllerStatusResponse`]`>` constructor, or [`ReadNamespacedReplicationControllerStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_replication_controller_status(
name: &str,
namespace: &str,
optional: ReadNamespacedReplicationControllerStatusOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedReplicationControllerStatusResponse>), crate::RequestError> {
let ReadNamespacedReplicationControllerStatusOptional {
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`ReplicationController::read_namespaced_replication_controller_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedReplicationControllerStatusOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedReplicationControllerStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::read_namespaced_replication_controller_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedReplicationControllerStatusResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedReplicationControllerStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedReplicationControllerStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedReplicationControllerStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceCoreV1NamespacedReplicationController
impl ReplicationController {
/// replace the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedReplicationControllerResponse`]`>` constructor, or [`ReplaceNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_replication_controller(
name: &str,
namespace: &str,
body: &crate::v1_15::api::core::v1::ReplicationController,
optional: ReplaceNamespacedReplicationControllerOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedReplicationControllerResponse>), crate::RequestError> {
let ReplaceNamespacedReplicationControllerOptional {
dry_run,
field_manager,
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(dry_run) = dry_run {
__query_pairs.append_pair("dryRun", dry_run);
}
if let Some(field_manager) = field_manager {
__query_pairs.append_pair("fieldManager", field_manager);
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`ReplicationController::replace_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedReplicationControllerOptional<'a> {
/// When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
pub dry_run: Option<&'a str>,
/// fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
pub field_manager: Option<&'a str>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::replace_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedReplicationControllerResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Created(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedReplicationControllerResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedReplicationControllerResponse::Created(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceCoreV1NamespacedReplicationControllerStatus
impl ReplicationController {
/// replace status of the specified ReplicationController
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedReplicationControllerStatusResponse`]`>` constructor, or [`ReplaceNamespacedReplicationControllerStatusResponse`] directly, to parse the HTTP response.
/// | /// name of the ReplicationController
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_replication_controller_status(
name: &str,
namespace: &str,
body: &crate::v1_15::api::core::v1::ReplicationController,
optional: ReplaceNamespacedReplicationControllerStatusOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedReplicationControllerStatusResponse>), crate::RequestError> {
let ReplaceNamespacedReplicationControllerStatusOptional {
dry_run,
field_manager,
pretty,
} = optional;
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/status?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(dry_run) = dry_run {
__query_pairs.append_pair("dryRun", dry_run);
}
if let Some(field_manager) = field_manager {
__query_pairs.append_pair("fieldManager", field_manager);
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`ReplicationController::replace_namespaced_replication_controller_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedReplicationControllerStatusOptional<'a> {
/// When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
pub dry_run: Option<&'a str>,
/// fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
pub field_manager: Option<&'a str>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedReplicationControllerStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::replace_namespaced_replication_controller_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedReplicationControllerStatusResponse {
Ok(crate::v1_15::api::core::v1::ReplicationController),
Created(crate::v1_15::api::core::v1::ReplicationController),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedReplicationControllerStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedReplicationControllerStatusResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedReplicationControllerStatusResponse::Created(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedReplicationControllerStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation watchCoreV1NamespacedReplicationController
impl ReplicationController {
/// list or watch objects of kind ReplicationController
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedReplicationControllerResponse`]`>` constructor, or [`WatchNamespacedReplicationControllerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_replication_controller(
namespace: &str,
optional: crate::v1_15::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedReplicationControllerResponse>), crate::RequestError> {
let __url = format!("/api/v1/namespaces/{namespace}/replicationcontrollers?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchNamespacedReplicationControllerResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::watch_namespaced_replication_controller`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchNamespacedReplicationControllerResponse {
Ok(crate::v1_15::apimachinery::pkg::apis::meta::v1::WatchEvent<ReplicationController>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchNamespacedReplicationControllerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchNamespacedReplicationControllerResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchNamespacedReplicationControllerResponse::Other(result), read))
},
}
}
}
// Generated from operation watchCoreV1ReplicationControllerForAllNamespaces
impl ReplicationController {
/// list or watch objects of kind ReplicationController
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchReplicationControllerForAllNamespacesResponse`]`>` constructor, or [`WatchReplicationControllerForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_replication_controller_for_all_namespaces(
optional: crate::v1_15::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchReplicationControllerForAllNamespacesResponse>), crate::RequestError> {
let __url = "/api/v1/replicationcontrollers?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchReplicationControllerForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`ReplicationController::watch_replication_controller_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchReplicationControllerForAllNamespacesResponse {
Ok(crate::v1_15::apimachinery::pkg::apis::meta::v1::WatchEvent<ReplicationController>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchReplicationControllerForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchReplicationControllerForAllNamespacesResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchReplicationControllerForAllNamespacesResponse::Other(result), read))
},
}
}
}
// End /v1/ReplicationController
impl crate::Resource for ReplicationController {
fn api_version() -> &'static str {
"v1"
}
fn group() -> &'static str {
""
}
fn kind() -> &'static str {
"ReplicationController"
}
fn version() -> &'static str {
"v1"
}
}
impl crate::Metadata for ReplicationController {
type Ty = crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> {
self.metadata.as_ref()
}
}
impl<'de> serde::Deserialize<'de> for ReplicationController {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Key_status,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
"status" => Field::Key_status,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = ReplicationController;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct ReplicationController")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::v1_15::api::core::v1::ReplicationControllerSpec> = None;
let mut value_status: Option<crate::v1_15::api::core::v1::ReplicationControllerStatus> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::api_version() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version()));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::kind() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind()));
}
},
Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(ReplicationController {
metadata: value_metadata,
spec: value_spec,
status: value_status,
})
}
}
deserializer.deserialize_struct(
"ReplicationController",
&[
"apiVersion",
"kind",
"metadata",
"spec",
"status",
],
Visitor,
)
}
}
impl serde::Serialize for ReplicationController {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"ReplicationController",
2 +
self.metadata.as_ref().map_or(0, |_| 1) +
self.spec.as_ref().map_or(0, |_| 1) +
self.status.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?;
if let Some(value) = &self.metadata {
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?;
}
if let Some(value) = &self.spec {
serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
if let Some(value) = &self.status {
serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?;
}
serde::ser::SerializeStruct::end(state)
}
} | /// # Arguments
///
/// * `name`
/// |
imports_spec.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import * as ts from 'typescript';
import {absoluteFrom, getFileSystem, getSourceFileOrError} from '../../file_system';
import {runInEachFileSystem} from '../../file_system/testing';
import {NOOP_PERF_RECORDER} from '../../perf';
import {ImportGraph} from '../src/imports';
import {importPath, makeProgramFromGraph} from './util';
runInEachFileSystem(() => {
describe('ImportGraph', () => {
let _: typeof absoluteFrom;
beforeEach(() => _ = absoluteFrom);
describe('importsOf()', () => {
it('should record imports of a simple program', () => {
const {program, graph} = makeImportGraph('a:b;b:c;c');
const a = getSourceFileOrError(program, (_('/a.ts')));
const b = getSourceFileOrError(program, (_('/b.ts')));
const c = getSourceFileOrError(program, (_('/c.ts')));
expect(importsToString(graph.importsOf(a))).toBe('b');
expect(importsToString(graph.importsOf(b))).toBe('c');
});
});
describe('findPath()', () => {
it('should be able to compute the path between two source files if there is a cycle', () => {
const {program, graph} = makeImportGraph('a:*b,*c;b:*e,*f;c:*g,*h;e:f;f;g:e;h:g');
const a = getSourceFileOrError(program, (_('/a.ts')));
const b = getSourceFileOrError(program, (_('/b.ts')));
const c = getSourceFileOrError(program, (_('/c.ts')));
const e = getSourceFileOrError(program, (_('/e.ts')));
expect(importPath(graph.findPath(a, a)!)).toBe('a');
expect(importPath(graph.findPath(a, b)!)).toBe('a,b');
expect(importPath(graph.findPath(c, e)!)).toBe('c,g,e');
expect(graph.findPath(e, c)).toBe(null);
expect(graph.findPath(b, c)).toBe(null);
});
it('should handle circular dependencies within the path between `from` and `to`', () => {
// a -> b -> c -> d
// ^----/ |
// ^---------/
const {program, graph} = makeImportGraph('a:b;b:a,c;c:a,d;d');
const a = getSourceFileOrError(program, (_('/a.ts')));
const c = getSourceFileOrError(program, (_('/c.ts')));
const d = getSourceFileOrError(program, (_('/d.ts')));
expect(importPath(graph.findPath(a, d)!)).toBe('a,b,c,d');
});
});
});
function makeImportGraph(graph: string): {program: ts.Program, graph: ImportGraph} {
const {program} = makeProgramFromGraph(getFileSystem(), graph);
return {
program,
graph: new ImportGraph(program.getTypeChecker(), NOOP_PERF_RECORDER),
};
}
function | (imports: Set<ts.SourceFile>): string {
const fs = getFileSystem();
return Array.from(imports)
.map(sf => fs.basename(sf.fileName).replace('.ts', ''))
.sort()
.join(',');
}
});
| importsToString |
IdentifyReplyCommandFirmwareVersion.go | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package model
import (
"github.com/apache/plc4x/plc4go/internal/plc4go/spi/utils"
"github.com/pkg/errors"
)
// Code generated by code-generation. DO NOT EDIT.
// IdentifyReplyCommandFirmwareVersion is the data-structure of this message
type IdentifyReplyCommandFirmwareVersion struct {
*IdentifyReplyCommand
FirmwareVersion string
}
// IIdentifyReplyCommandFirmwareVersion is the corresponding interface of IdentifyReplyCommandFirmwareVersion
type IIdentifyReplyCommandFirmwareVersion interface {
IIdentifyReplyCommand
// GetFirmwareVersion returns FirmwareVersion (property field)
GetFirmwareVersion() string
// GetLengthInBytes returns the length in bytes
GetLengthInBytes() uint16
// GetLengthInBits returns the length in bits
GetLengthInBits() uint16
// Serialize serializes this type
Serialize(writeBuffer utils.WriteBuffer) error
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
/////////////////////// Accessors for discriminator values.
///////////////////////
func (m *IdentifyReplyCommandFirmwareVersion) GetAttribute() Attribute {
return Attribute_FirmwareVersion
}
///////////////////////
///////////////////////
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
func (m *IdentifyReplyCommandFirmwareVersion) InitializeParent(parent *IdentifyReplyCommand) {}
func (m *IdentifyReplyCommandFirmwareVersion) GetParent() *IdentifyReplyCommand {
return m.IdentifyReplyCommand
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
/////////////////////// Accessors for property fields.
///////////////////////
func (m *IdentifyReplyCommandFirmwareVersion) GetFirmwareVersion() string {
return m.FirmwareVersion
}
///////////////////////
///////////////////////
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
// NewIdentifyReplyCommandFirmwareVersion factory function for IdentifyReplyCommandFirmwareVersion
func NewIdentifyReplyCommandFirmwareVersion(firmwareVersion string) *IdentifyReplyCommandFirmwareVersion {
_result := &IdentifyReplyCommandFirmwareVersion{
FirmwareVersion: firmwareVersion,
IdentifyReplyCommand: NewIdentifyReplyCommand(),
}
_result.Child = _result
return _result
}
func CastIdentifyReplyCommandFirmwareVersion(structType interface{}) *IdentifyReplyCommandFirmwareVersion {
if casted, ok := structType.(IdentifyReplyCommandFirmwareVersion); ok {
return &casted
}
if casted, ok := structType.(*IdentifyReplyCommandFirmwareVersion); ok {
return casted
}
if casted, ok := structType.(IdentifyReplyCommand); ok {
return CastIdentifyReplyCommandFirmwareVersion(casted.Child)
}
if casted, ok := structType.(*IdentifyReplyCommand); ok {
return CastIdentifyReplyCommandFirmwareVersion(casted.Child)
}
return nil
}
func (m *IdentifyReplyCommandFirmwareVersion) GetTypeName() string {
return "IdentifyReplyCommandFirmwareVersion" | }
func (m *IdentifyReplyCommandFirmwareVersion) GetLengthInBitsConditional(lastItem bool) uint16 {
lengthInBits := uint16(m.GetParentLengthInBits())
// Simple field (firmwareVersion)
lengthInBits += 64
return lengthInBits
}
func (m *IdentifyReplyCommandFirmwareVersion) GetLengthInBytes() uint16 {
return m.GetLengthInBits() / 8
}
func IdentifyReplyCommandFirmwareVersionParse(readBuffer utils.ReadBuffer, attribute Attribute) (*IdentifyReplyCommandFirmwareVersion, error) {
if pullErr := readBuffer.PullContext("IdentifyReplyCommandFirmwareVersion"); pullErr != nil {
return nil, pullErr
}
currentPos := readBuffer.GetPos()
_ = currentPos
// Simple Field (firmwareVersion)
_firmwareVersion, _firmwareVersionErr := readBuffer.ReadString("firmwareVersion", uint32(64))
if _firmwareVersionErr != nil {
return nil, errors.Wrap(_firmwareVersionErr, "Error parsing 'firmwareVersion' field")
}
firmwareVersion := _firmwareVersion
if closeErr := readBuffer.CloseContext("IdentifyReplyCommandFirmwareVersion"); closeErr != nil {
return nil, closeErr
}
// Create a partially initialized instance
_child := &IdentifyReplyCommandFirmwareVersion{
FirmwareVersion: firmwareVersion,
IdentifyReplyCommand: &IdentifyReplyCommand{},
}
_child.IdentifyReplyCommand.Child = _child
return _child, nil
}
func (m *IdentifyReplyCommandFirmwareVersion) Serialize(writeBuffer utils.WriteBuffer) error {
ser := func() error {
if pushErr := writeBuffer.PushContext("IdentifyReplyCommandFirmwareVersion"); pushErr != nil {
return pushErr
}
// Simple Field (firmwareVersion)
firmwareVersion := string(m.FirmwareVersion)
_firmwareVersionErr := writeBuffer.WriteString("firmwareVersion", uint32(64), "UTF-8", (firmwareVersion))
if _firmwareVersionErr != nil {
return errors.Wrap(_firmwareVersionErr, "Error serializing 'firmwareVersion' field")
}
if popErr := writeBuffer.PopContext("IdentifyReplyCommandFirmwareVersion"); popErr != nil {
return popErr
}
return nil
}
return m.SerializeParent(writeBuffer, m, ser)
}
func (m *IdentifyReplyCommandFirmwareVersion) String() string {
if m == nil {
return "<nil>"
}
buffer := utils.NewBoxedWriteBufferWithOptions(true, true)
if err := m.Serialize(buffer); err != nil {
return err.Error()
}
return buffer.GetBox().String()
} | }
func (m *IdentifyReplyCommandFirmwareVersion) GetLengthInBits() uint16 {
return m.GetLengthInBitsConditional(false) |
genembed2.go | // run
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test for declaration and use of a parameterized embedded field.
package main
import (
"fmt"
"sync"
)
type MyStruct[T any] struct {
val T
}
type Lockable[T any] struct {
MyStruct[T]
mu sync.Mutex
}
// Get returns the value stored in a Lockable.
func (l *Lockable[T]) Get() T {
l.mu.Lock()
defer l.mu.Unlock()
return l.MyStruct.val
}
// Set sets the value in a Lockable.
func (l *Lockable[T]) Set(v T) {
l.mu.Lock()
defer l.mu.Unlock()
l.MyStruct = MyStruct[T]{v}
}
func | () {
var li Lockable[int]
li.Set(5)
if got, want := li.Get(), 5; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
| main |
users.py | from flask import jsonify, request, url_for, abort
from app import db
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.models import User
@bp.route('/users/<int:id>', methods=['GET'])
@token_auth.login_required
def get_user(id):
return jsonify(User.query.get_or_404(id).to_dict())
@bp.route('/users', methods=['GET'])
@token_auth.login_required
def get_users():
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(User.query, page, per_page, 'api.get_users')
return jsonify(data)
@bp.route('/users/<int:id>/followers', methods=['GET'])
@token_auth.login_required
def get_followers(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(user.followers, page, per_page,
'api.get_followers', id=id)
return jsonify(data)
@bp.route('/users/<int:id>/followed', methods=['GET'])
@token_auth.login_required
def get_followed(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = User.to_collection_dict(user.followed, page, per_page,
'api.get_followed', id=id)
return jsonify(data)
@bp.route('/users', methods=['POST'])
def create_user():
data = request.get_json() or {}
if 'username' not in data or 'email' not in data or 'password' not in data:
return bad_request('Request must include username, email and password')
if User.query.filter_by(username=data['username']).first():
return bad_request('Please use a different username')
if User.query.filter_by(email=data['email']).first():
return bad_request('Please use a different email')
user = User()
user.from_dict(data, new_user=True)
db.session.add(user)
db.session.commit()
response = jsonify(user.to_dict())
response.status_code = 201
response.headers['Location'] = url_for('api.get_user', id=user.id)
return response
@bp.route('/users/<int:id>', methods=['PUT'])
@token_auth.login_required
def | (id):
if token_auth.current_user().id != id:
abort(403)
user = User.query.get_or_404(id)
data = request.get_json() or {}
if 'username' in data and data['username'] != user.username and \
User.query.filter_by(username=data['username']).first():
return bad_request('Please use a different username')
if 'email' in data and data['email'] != user.email and \
User.query.filter_by(email=data['email']).first():
return bad_request('Please use a different email')
user.from_dict(data, new_user=False)
db.session.commit()
return jsonify(user.to_dict())
| update_user |
tcp.go | package meters
import (
"time"
"github.com/grid-x/modbus"
)
// TCP is a TCP modbus connection
type TCP struct {
address string
Client modbus.Client
Handler *modbus.TCPClientHandler
}
// NewTCPClientHandler creates a TCP modbus handler
func NewTCPClientHandler(device string) *modbus.TCPClientHandler {
handler := modbus.NewTCPClientHandler(device)
// set default timings
handler.Timeout = 1 * time.Second
handler.ProtocolRecoveryTimeout = 10 * time.Second
handler.LinkRecoveryTimeout = 15 * time.Second
return handler
}
// NewTCP creates a TCP modbus client
func NewTCP(address string) Connection {
handler := NewTCPClientHandler(address)
client := modbus.NewClient(handler)
b := &TCP{ | }
return b
}
// String returns the bus connection address (TCP)
func (b *TCP) String() string {
return b.address
}
// ModbusClient returns the TCP modbus client
func (b *TCP) ModbusClient() modbus.Client {
return b.Client
}
// Logger sets a logging instance for physical bus operations
func (b *TCP) Logger(l Logger) {
b.Handler.Logger = l
}
// Slave sets the modbus device id for the following operations
func (b *TCP) Slave(deviceID uint8) {
b.Handler.SetSlave(deviceID)
}
// Timeout sets the modbus timeout
func (b *TCP) Timeout(timeout time.Duration) time.Duration {
t := b.Handler.Timeout
b.Handler.Timeout = timeout
return t
}
// Close closes the modbus connection.
// This forces the modbus client to reopen the connection before the next bus operations.
func (b *TCP) Close() {
b.Handler.Close()
} | address: address,
Client: client,
Handler: handler, |
specified_location.py | # from .provider_test import ProviderTest, TestSource
from gunpowder import (BatchProvider, ArrayKeys, ArraySpec, Roi, Batch,
Coordinate, SpecifiedLocation, build,
BatchRequest, Array, ArrayKey)
import numpy as np
import unittest
class TestSourceSpecifiedLocation(BatchProvider):
def __init__(self, roi, voxel_size):
self.voxel_size = Coordinate(voxel_size)
self.roi = roi
size = self.roi.get_shape() / self.voxel_size
self.data = np.arange(np.prod(size)).reshape(size)
def setup(self):
self.provides(
ArrayKeys.RAW,
ArraySpec(
roi=self.roi,
voxel_size=self.voxel_size))
def provide(self, request):
batch = Batch()
spec = request[ArrayKeys.RAW].copy()
spec.voxel_size = self.voxel_size
size = spec.roi.get_shape() / spec.voxel_size
offset = spec.roi.get_offset() / spec.voxel_size
slce = tuple(slice(o, o + s) for o, s in zip(offset, size))
batch.arrays[ArrayKeys.RAW] = Array(
data=self.data[slce],
spec=spec)
return batch
class TestSpecifiedLocation(unittest.TestCase):
| def setUp(self):
ArrayKey('RAW')
def test_simple(self):
locations = [
[0, 0, 0],
[100, 100, 100],
[91, 20, 20],
[42, 24, 57]
]
pipeline = (
TestSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)),
voxel_size=(1, 1, 1)) +
SpecifiedLocation(
locations,
choose_randomly=False,
extra_data=None,
jitter=None)
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{
ArrayKeys.RAW: ArraySpec(
roi=Roi((0, 0, 0), (20, 20, 20)))
}))
# first three locations are skipped
# fourth should start at [32, 14, 47] of self.data
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 321447)
def test_voxel_size(self):
locations = [
[0, 0, 0],
[91, 20, 20],
[42, 24, 57]
]
pipeline = (
TestSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)),
voxel_size=(5, 2, 2)) +
SpecifiedLocation(
locations,
choose_randomly=False,
extra_data=None,
jitter=None)
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{
ArrayKeys.RAW: ArraySpec(
roi=Roi((0, 0, 0), (20, 20, 20)))
}))
# first locations is skipped
# second should start at [80/5, 10/2, 10/2] = [16, 5, 5]
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 40255)
batch = pipeline.request_batch(
BatchRequest(
{
ArrayKeys.RAW: ArraySpec(
roi=Roi((0, 0, 0), (20, 20, 20)))
}))
# third should start at [30/5, 14/2, 48/2] = [6, 7, 23]
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 15374)
def test_jitter_and_random(self):
locations = [
[0, 0, 0],
[91, 20, 20],
[42, 24, 57]
]
pipeline = (
TestSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)),
voxel_size=(5, 2, 2)) +
SpecifiedLocation(
locations,
choose_randomly=True,
extra_data=None,
jitter=(5, 5, 5))
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{
ArrayKeys.RAW: ArraySpec(
roi=Roi((0, 0, 0), (20, 20, 20)))
}))
# Unclear what result should be, so no errors means passing
self.assertTrue(batch.arrays[ArrayKeys.RAW].data[0, 0, 0] > 0) |
|
layout.js | /**
* Layout component that queries for data
* with Gatsby's useStaticQuery component
*
* See: https://www.gatsbyjs.org/docs/use-static-query/
*/
import React from "react";
import PropTypes from "prop-types";
import { useStaticQuery, graphql } from "gatsby";
import Container from "@material-ui/core/Container";
import Footer from "./footer";
import Header from "./header";
import "./layout.css";
const Layout = ({ children }) => {
const data = useStaticQuery(graphql` | title
}
}
}
`);
return (
<Container maxWidth="lg">
<Header siteTitle={data.site.siteMetadata.title} />
<div
style={{
margin: `0 auto`,
maxWidth: 960,
padding: `0 1.0875rem 1.45rem`
}}
>
<main>{children}</main>
<Footer />
</div>
</Container>
);
};
Layout.propTypes = {
children: PropTypes.node.isRequired
};
export default Layout; | query SiteTitleQuery {
site {
siteMetadata { |
index.js | import './index.css';
import Uploader from './uploader';
const LOADER_TIMEOUT = 500;
const Icon =
'<svg width="12" height="14" xmlns="http://www.w3.org/2000/svg"><path d="M4.109 2.08H2.942a.862.862 0 0 0-.862.862v8.116c0 .476.386.862.862.862h5.529a.862.862 0 0 0 .862-.862V7.695H4.11V2.08zm1.905.497v3.29h3.312l-3.312-3.29zM2.942 0h2.74c.326.02.566.076.719.165.153.09.484.413.992.973l3.21 3.346c.347.413.557.683.631.811.111.193.179.446.179.579v5.184A2.942 2.942 0 0 1 8.471 14H2.942A2.942 2.942 0 0 1 0 11.058V2.942A2.942 2.942 0 0 1 2.942 0z" fill-rule="nonzero"/></svg>';
const FileIcon =
'<svg xmlns="http://www.w3.org/2000/svg" width="32" height="40"><g fill="#A8ACB8" fill-rule="evenodd"><path fill-rule="nonzero" d="M17 0l15 14V3v34a3 3 0 0 1-3 3H3a3 3 0 0 1-3-3V3a3 3 0 0 1 3-3h20-6zm0 2H3a1 1 0 0 0-1 1v34a1 1 0 0 0 1 1h26a1 1 0 0 0 1-1V14H17V2zm2 10h7.926L19 4.602V12z"/><path d="M7 22h18v2H7zm0 4h18v2H7zm0 4h18v2H7z"/></g></svg>';
const CustomFileIcon =
'<svg xmlns="http://www.w3.org/2000/svg" width="32" height="40"><path d="M17 0l15 14V3v34a3 3 0 0 1-3 3H3a3 3 0 0 1-3-3V3a3 3 0 0 1 3-3h20-6zm0 2H3a1 1 0 0 0-1 1v34a1 1 0 0 0 1 1h26a1 1 0 0 0 1-1V14H17V2zm2 10h7.926L19 4.602V12z"/></svg>';
const DownloadIcon =
'<svg xmlns="http://www.w3.org/2000/svg" width="17pt" height="17pt" viewBox="0 0 17 17"><path d="M9.457 8.945V2.848A.959.959 0 0 0 8.5 1.89a.959.959 0 0 0-.957.957v6.097L4.488 5.891a.952.952 0 0 0-1.351 0 .952.952 0 0 0 0 1.351l4.687 4.688a.955.955 0 0 0 1.352 0l4.687-4.688a.952.952 0 0 0 0-1.351.952.952 0 0 0-1.351 0zM3.59 14.937h9.82a.953.953 0 0 0 .953-.957.952.952 0 0 0-.953-.953H3.59a.952.952 0 0 0-.953.953c0 .532.425.957.953.957zm0 0" fill-rule="evenodd"/></svg>';
/**
* @typedef {object} AttachesToolData
* @description Attaches Tool's output data format
* @property {AttachesFileData} file - object containing information about the file
* @property {string} title - file's title
*/
/**
* @typedef {object} AttachesFileData
* @description Attaches Tool's file format
* @property {string} [url] - file's upload url
* @property {string} [size] - file's size
* @property {string} [extension] - file's extension
* @property {string} [name] - file's name
*/
/**
* @typedef {object} FileData
* @description Attaches Tool's response from backend
* @property {string} url - file's url
* @property {string} name - file's name with extension
* @property {string} extension - file's extension
*/
/**
* @typedef {object} UploadResponseFormat
* @description This format expected from backend on file upload
* @property {number} success - 1 for successful uploading, 0 for failure
* @property {FileData} file - backend response with uploaded file data.
*/
/**
* @typedef {object} AttachesToolConfig
* @description Config supported by Tool
* @property {string} endpoint - file upload url
* @property {string} field - field name for uploaded file
* @property {string} types - available mime-types
* @property {string} placeholder
* @property {string} errorMessage
*/
/**
* @class AttachesTool
* @classdesc AttachesTool for Editor.js 2.0
*
* @property {API} api - Editor.js API
* @property {AttachesToolData} data
* @property {AttachesToolConfig} config
*/
export default class | {
static get isReadOnlySupported() {
return true;
}
/**
* @param {AttachesToolData} data
* @param {Object} config
* @param {API} api
* @param {boolean} readOnly - read-only mode flag
*/
constructor({ data, config, api, readOnly }) {
this.api = api;
this.readOnly = readOnly;
this.nodes = {
wrapper: null,
button: null,
title: null,
};
this._data = {
file: {},
title: '',
};
this.config = {
endpoint: config.endpoint || '',
field: config.field || 'file',
types: config.types || '*',
buttonText: config.buttonText || 'Select file to upload',
errorMessage: config.errorMessage || 'File upload failed',
};
this.data = data;
/**
* Module for files uploading
*/
this.uploader = new Uploader({
config: this.config,
onUpload: (response) => this.onUpload(response),
onError: (error) => this.uploadingFailed(error),
});
this.enableFileUpload = this.enableFileUpload.bind(this);
}
/**
* Get Tool toolbox settings
* icon - Tool icon's SVG
* title - title to show in toolbox
*/
static get toolbox() {
return {
icon: Icon,
title: 'Attaches',
};
}
/**
* Tool's CSS classes
*/
get CSS() {
return {
baseClass: this.api.styles.block,
apiButton: this.api.styles.button,
loader: this.api.styles.loader,
/**
* Tool's classes
*/
wrapper: 'cdx-attaches',
wrapperWithFile: 'cdx-attaches--with-file',
wrapperLoading: 'cdx-attaches--loading',
button: 'cdx-attaches__button',
title: 'cdx-attaches__title',
size: 'cdx-attaches__size',
downloadButton: 'cdx-attaches__download-button',
fileInfo: 'cdx-attaches__file-info',
fileIcon: 'cdx-attaches__file-icon',
};
}
/**
* Possible files' extension colors
*/
get EXTENSIONS() {
return {
doc: '#3e74da',
docx: '#3e74da',
odt: '#3e74da',
pdf: '#d47373',
rtf: '#656ecd',
tex: '#5a5a5b',
txt: '#5a5a5b',
pptx: '#e07066',
ppt: '#e07066',
mp3: '#eab456',
mp4: '#f676a6',
xls: '#3f9e64',
html: '#2988f0',
htm: '#2988f0',
png: '#f676a6',
jpg: '#f67676',
jpeg: '#f67676',
gif: '#f6af76',
zip: '#4f566f',
rar: '#4f566f',
exe: '#e26f6f',
svg: '#bf5252',
key: '#e07066',
sketch: '#df821c',
ai: '#df821c',
psd: '#388ae5',
dmg: '#e26f6f',
json: '#2988f0',
csv: '#3f9e64',
};
}
/**
* Return Block data
* @param {HTMLElement} toolsContent
* @return {AttachesToolData}
*/
save(toolsContent) {
/**
* If file was uploaded
*/
if (this.pluginHasData()) {
const title = toolsContent.querySelector(`.${this.CSS.title}`).innerHTML;
Object.assign(this.data, { title });
}
return this.data;
}
/**
* Renders Block content
* @return {HTMLDivElement}
*/
render() {
const holder = this.make('div', this.CSS.baseClass);
this.nodes.wrapper = this.make('div', this.CSS.wrapper);
if (this.pluginHasData()) {
this.showFileData();
} else {
this.prepareUploadButton();
}
holder.appendChild(this.nodes.wrapper);
return holder;
}
/**
* Prepares button for file uploading
*/
prepareUploadButton() {
this.nodes.button = this.make('div', [this.CSS.apiButton, this.CSS.button]);
this.nodes.button.innerHTML = `${Icon} ${this.config.buttonText}`;
this.nodes.button.addEventListener('click', this.enableFileUpload);
this.nodes.wrapper.appendChild(this.nodes.button);
}
/**
* Fires after clicks on the Toolbox AttachesTool Icon
* Initiates click on the Select File button
* @public
*/
appendCallback() {
this.nodes.button.click();
}
/**
* Checks if any of Tool's fields have data
* @return {boolean}
*/
pluginHasData() {
return (
this.data.title !== '' || Object.values(this.data.file).some((item) => item !== undefined)
);
}
/**
* Allow to upload files on button click
*/
enableFileUpload() {
this.uploader.uploadSelectedFile({
onPreview: () => {
this.nodes.wrapper.classList.add(this.CSS.wrapperLoading, this.CSS.loader);
},
});
}
/**
* File uploading callback
* @param {UploadResponseFormat} response
*/
onUpload(response) {
const body = response.body;
if (body.result.code === 'RS0000') {
const { download: url, originalname: name, size } = body.data;
this.data = {
file: {
url,
extension: name.split('.').pop(),
name,
size,
},
title: name,
};
this.nodes.button.remove();
this.showFileData();
this.moveCaretToEnd(this.nodes.title);
this.nodes.title.focus();
this.removeLoader();
} else {
this.uploadingFailed(this.config.errorMessage);
}
}
/**
* Handles uploaded file's extension and appends corresponding icon
*/
appendFileIcon() {
const extension = this.data.file.extension || '';
const extensionColor = this.EXTENSIONS[extension];
const fileIcon = this.make('div', this.CSS.fileIcon, {
innerHTML: extensionColor ? CustomFileIcon : FileIcon,
});
if (extensionColor) {
fileIcon.style.color = extensionColor;
fileIcon.setAttribute('data-extension', extension);
}
this.nodes.wrapper.appendChild(fileIcon);
}
/**
* Removes tool's loader
*/
removeLoader() {
setTimeout(
() => this.nodes.wrapper.classList.remove(this.CSS.wrapperLoading, this.CSS.loader),
LOADER_TIMEOUT,
);
}
/**
* If upload is successful, show info about the file
*/
showFileData() {
this.nodes.wrapper.classList.add(this.CSS.wrapperWithFile);
const {
file: { size, url },
title,
} = this.data;
this.appendFileIcon();
const fileInfo = this.make('div', this.CSS.fileInfo);
if (title) {
this.nodes.title = this.make('div', this.CSS.title, {
contentEditable: !this.readOnly,
});
this.nodes.title.textContent = title;
fileInfo.appendChild(this.nodes.title);
}
if (size) {
let sizePrefix;
let formattedSize;
const fileSize = this.make('div', this.CSS.size);
if (Math.log10(+size) >= 6) {
sizePrefix = 'MiB';
formattedSize = size / Math.pow(2, 20);
} else {
sizePrefix = 'KiB';
formattedSize = size / Math.pow(2, 10);
}
fileSize.textContent = formattedSize.toFixed(1);
fileSize.setAttribute('data-size', sizePrefix);
fileInfo.appendChild(fileSize);
}
this.nodes.wrapper.appendChild(fileInfo);
const downloadIcon = this.make('a', this.CSS.downloadButton, {
innerHTML: DownloadIcon,
href: url,
target: '_blank',
rel: 'nofollow noindex noreferrer',
});
this.nodes.wrapper.appendChild(downloadIcon);
}
/**
* If file uploading failed, remove loader and show notification
* @param {string} errorMessage - error message
*/
uploadingFailed(errorMessage) {
this.api.notifier.show({
message: errorMessage,
style: 'error',
});
this.removeLoader();
}
/**
* Return Attaches Tool's data
* @return {AttachesToolData}
*/
get data() {
return this._data;
}
/**
* Stores all Tool's data
* @param {AttachesToolData} data
*/
set data({ file, title }) {
this._data = Object.assign(
{},
{
file: {
url: (file && file.url) || this._data.file.url,
name: (file && file.name) || this._data.file.name,
extension: (file && file.extension) || this._data.file.extension,
size: (file && file.size) || this._data.file.size,
},
title: title || this._data.title,
},
);
}
/**
* Moves caret to the end of contentEditable element
* @param {HTMLElement} element - contentEditable element
*/
moveCaretToEnd(element) {
const range = document.createRange();
const selection = window.getSelection();
range.selectNodeContents(element);
range.collapse(false);
selection.removeAllRanges();
selection.addRange(range);
}
/**
* Helper method for elements creation
* @param tagName
* @param classNames
* @param attributes
* @return {HTMLElement}
*/
make(tagName, classNames = null, attributes = {}) {
const el = document.createElement(tagName);
if (Array.isArray(classNames)) {
el.classList.add(...classNames);
} else if (classNames) {
el.classList.add(classNames);
}
for (const attrName in attributes) {
el[attrName] = attributes[attrName];
}
return el;
}
}
| AttachesTool |
listtocommaseparated.py | #! /usr/bin/env python3
"""converts list to comma separated string"""
items = ['foo', 'bar', 'xyz']
print (','.join(items)) | print (','.join(map(str, numbers)))
"""list of mix data"""
data = [2, 'hello', 3, 3.4]
print (','.join(map(str, data))) |
"""list of numbers to comma separated"""
numbers = [2, 3, 5, 10]
|
server.go | package main
import (
"log"
"github.com/kataras/iris"
"github.com/kataras/iris/websocket"
// Used when "enableJWT" constant is true:
"github.com/iris-contrib/middleware/jwt"
)
// values should match with the client sides as well.
const enableJWT = true
const namespace = "default"
// if namespace is empty then simply websocket.Events{...} can be used instead.
var serverEvents = websocket.Namespaces{
namespace: websocket.Events{
websocket.OnNamespaceConnected: func(nsConn *websocket.NSConn, msg websocket.Message) error {
// with `websocket.GetContext` you can retrieve the Iris' `Context`.
ctx := websocket.GetContext(nsConn.Conn)
log.Printf("[%s] connected to namespace [%s] with IP [%s]",
nsConn, msg.Namespace,
ctx.RemoteAddr())
return nil
},
websocket.OnNamespaceDisconnect: func(nsConn *websocket.NSConn, msg websocket.Message) error {
log.Printf("[%s] disconnected from namespace [%s]", nsConn, msg.Namespace)
return nil
},
"chat": func(nsConn *websocket.NSConn, msg websocket.Message) error {
// room.String() returns -> NSConn.String() returns -> Conn.String() returns -> Conn.ID()
log.Printf("[%s] sent: %s", nsConn, string(msg.Body))
// Write message back to the client message owner with:
// nsConn.Emit("chat", msg)
// Write message to all except this client with:
nsConn.Conn.Server().Broadcast(nsConn, msg)
return nil
},
},
}
func | () {
app := iris.New()
websocketServer := websocket.New(
websocket.DefaultGorillaUpgrader, /* DefaultGobwasUpgrader can be used too. */
serverEvents)
j := jwt.New(jwt.Config{
// Extract by the "token" url,
// so the client should dial with ws://localhost:8080/echo?token=$token
Extractor: jwt.FromParameter("token"),
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte("My Secret"), nil
},
// When set, the middleware verifies that tokens are signed
// with the specific signing algorithm
// If the signing method is not constant the
// `Config.ValidationKeyGetter` callback field can be used
// to implement additional checks
// Important to avoid security issues described here:
// https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
SigningMethod: jwt.SigningMethodHS256,
})
// serves the endpoint of ws://localhost:8080/echo
websocketRoute := app.Get("/echo", websocket.Handler(websocketServer))
if enableJWT {
// Register the jwt middleware (on handshake):
websocketRoute.Use(j.Serve)
// OR
//
// Check for token through the jwt middleware
// on websocket connection or on any event:
/* websocketServer.OnConnect = func(c *websocket.Conn) error {
ctx := websocket.GetContext(c)
if err := j.CheckJWT(ctx); err != nil {
// will send the above error on the client
// and will not allow it to connect to the websocket server at all.
return err
}
user := ctx.Values().Get("jwt").(*jwt.Token)
// or just: user := j.Get(ctx)
log.Printf("This is an authenticated request\n")
log.Printf("Claim content:")
log.Printf("%#+v\n", user.Claims)
log.Printf("[%s] connected to the server", c.ID())
return nil
} */
}
// serves the browser-based websocket client.
app.Get("/", func(ctx iris.Context) {
ctx.ServeFile("./browser/index.html", false)
})
// serves the npm browser websocket client usage example.
app.HandleDir("/browserify", "./browserify")
app.Run(iris.Addr(":8080"), iris.WithoutServerError(iris.ErrServerClosed))
}
| main |
config.rs | use anyhow::{bail, Error, Result};
use std::panic::panic_any;
const BINARY_NAME: &str = env!("CARGO_BIN_NAME");
#[derive(serde::Deserialize, Clone, Debug)]
pub struct CompleteConfig {
pub private_key_path: String,
pub application_id: u64,
pub application_token: String,
#[serde(default = "default_server_uri")]
pub server_uri: String,
}
fn default_server_uri() -> String {
"127.0.0.1:3000".to_string()
}
impl CompleteConfig {
pub fn new() -> Result<Self, Error> {
if let Ok(config_contents) = std::fs::read_to_string(config_path()) {
let config: CompleteConfig = toml::from_str(config_contents.as_str()).unwrap();
Ok(config)
} else {
bail!(
"Configuration not found. Create a config file at '{}', and see '{}' for an example configuration.",
config_path(),
format!("{}/blob/main/default-config.toml", env!("CARGO_PKG_REPOSITORY"))
)
}
}
}
pub fn config_path() -> String |
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[cfg(target_os = "windows")]
fn test_windows_config_path() {
match std::env::var("APPDATA") {
Ok(appdata_path) => assert_eq!(
config_path(),
format!("{}\\{}\\config.toml", appdata_path, BINARY_NAME)
),
Err(err) => std::panic::panic_any(err),
}
}
#[test]
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn test_unix_config_path() {
match std::env::var("HOME") {
Ok(env_home_path) => assert_eq!(
config_path(),
format!("{}/.config/{}/config.toml", env_home_path, BINARY_NAME)
),
Err(err) => std::panic::panic_any(err),
}
}
}
| {
match std::env::consts::OS {
"linux" | "macos" => match std::env::var("HOME") {
Ok(env_home_path) => format!("{}/.config/{}/config.toml", env_home_path, BINARY_NAME),
Err(err) => panic_any(err),
},
"windows" => match std::env::var("APPDATA") {
Ok(appdata_path) => format!("{}\\{}\\config.toml", appdata_path, BINARY_NAME),
Err(err) => std::panic::panic_any(err),
},
_ => unimplemented!(),
}
} |
imagenet_utils.py | """Utilities for ImageNet data preprocessing & prediction decoding.
"""
import json
import keras.utils.data_utils as data_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
def | (preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description)`.
One list of tuples per sample in batch input.
# Raises
ValueError: In case of invalid shape of the `preds` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 5:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 5)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred[:min(top, 5)]
result = [tuple(CLASS_INDEX[str(i)]) for i in top_indices]
results.append(result)
return results
| decode_predictions |
lr_monitor.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Learning Rate Monitor
=====================
Monitor and logs learning rate for lr schedulers during training.
"""
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Set, Type
from torch.optim.optimizer import Optimizer
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class LearningRateMonitor(Callback):
r"""
Automatically monitor and logs learning rate for learning rate schedulers during training.
Args:
logging_interval: set to ``'epoch'`` or ``'step'`` to log ``lr`` of all optimizers
at the same interval, set to ``None`` to log at individual interval
according to the ``interval`` key of each scheduler. Defaults to ``None``.
log_momentum: option to also log the momentum values of the optimizer, if the optimizer
has the ``momentum`` or ``betas`` attribute. Defaults to ``False``.
Raises:
MisconfigurationException:
If ``logging_interval`` is none of ``"step"``, ``"epoch"``, or ``None``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import LearningRateMonitor
>>> lr_monitor = LearningRateMonitor(logging_interval='step')
>>> trainer = Trainer(callbacks=[lr_monitor])
Logging names are automatically determined based on optimizer class name.
In case of multiple optimizers of same type, they will be named ``Adam``,
``Adam-1`` etc. If a optimizer has multiple parameter groups they will
be named ``Adam/pg1``, ``Adam/pg2`` etc. To control naming, pass in a
``name`` keyword in the construction of the learning rate schedulers.
A ``name`` keyword can also be used for parameter groups in the
construction of the optimizer.
Example::
def configure_optimizer(self):
optimizer = torch.optim.Adam(...)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
'name': 'my_logging_name'
}
return [optimizer], [lr_scheduler]
Example::
def configure_optimizer(self):
optimizer = torch.optim.SGD(
[{
'params': [p for p in self.parameters()],
'name': 'my_parameter_group_name'
}],
lr=0.1
)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
return [optimizer], [lr_scheduler]
"""
def __init__(self, logging_interval: Optional[str] = None, log_momentum: bool = False):
if logging_interval not in (None, 'step', 'epoch'):
raise MisconfigurationException('logging_interval should be `step` or `epoch` or `None`.')
self.logging_interval = logging_interval
self.log_momentum = log_momentum
self.lrs = None
self.lr_sch_names = []
def on_train_start(self, trainer, *args, **kwargs):
"""
Called before training, determines unique names for all lr
schedulers in the case of multiple of the same type or in
the case of multiple parameter groups
Raises:
MisconfigurationException:
If ``Trainer`` has no ``logger``.
"""
if not trainer.logger:
raise MisconfigurationException(
'Cannot use `LearningRateMonitor` callback with `Trainer` that has no logger.'
)
if not trainer.lr_schedulers:
rank_zero_warn(
'You are using `LearningRateMonitor` callback with models that'
' have no learning rate schedulers. Please see documentation'
' for `configure_optimizers` method.', RuntimeWarning
)
if self.log_momentum:
def _check_no_key(key):
return any(key not in sch['scheduler'].optimizer.defaults for sch in trainer.lr_schedulers)
if _check_no_key('momentum') and _check_no_key('betas'):
rank_zero_warn(
"You have set log_momentum=True, but some optimizers do not"
" have momentum. This will log a value 0 for the momentum.", RuntimeWarning
)
# Find names for schedulers
names = self._find_names(trainer.lr_schedulers)
# Initialize for storing values
self.lrs = {name: [] for name in names}
self.last_momentum_values = {name + "-momentum": None for name in names}
def on_train_batch_start(self, trainer, *args, **kwargs):
if not self._should_log(trainer):
return
if self.logging_interval != 'epoch':
interval = 'step' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def on_train_epoch_start(self, trainer, *args, **kwargs):
if self.logging_interval != 'step':
interval = 'epoch' if self.logging_interval is None else 'any'
latest_stat = self._extract_stats(trainer, interval)
if latest_stat:
trainer.logger.log_metrics(latest_stat, step=trainer.global_step)
def _extract_stats(self, trainer, interval: str) -> Dict[str, float]:
latest_stat = {}
names = self._find_names(trainer.lr_schedulers, add_lr_sch_names=False)
self._remap_keys(names)
for name, scheduler in zip(self.lr_sch_names, trainer.lr_schedulers):
if scheduler['interval'] == interval or interval == 'any':
opt = scheduler['scheduler'].optimizer
param_groups = opt.param_groups
use_betas = 'betas' in opt.defaults
for i, pg in enumerate(param_groups):
name_and_suffix = self._add_suffix(name, param_groups, i)
lr = self._extract_lr(pg, name_and_suffix)
latest_stat.update(lr)
momentum = self._extract_momentum(
param_group=pg, name=name_and_suffix.replace(name, f'{name}-momentum'), use_betas=use_betas
)
latest_stat.update(momentum)
return latest_stat
def _extract_lr(self, param_group: Dict[str, Any], name: str) -> Dict[str, Any]:
lr = param_group.get('lr')
self.lrs[name].append(lr)
return {name: lr}
def _remap_keys(self, names: List[str], token: str = '/pg1') -> None:
"""
This function is used the remap the keys if param groups for a given optimizer increased.
"""
for new_name in names:
old_name = new_name.replace(token, '')
if token in new_name and old_name in self.lrs:
self.lrs[new_name] = self.lrs.pop(old_name)
elif new_name not in self.lrs:
self.lrs[new_name] = []
def _extract_momentum(self, param_group: Dict[str, Any], name: str, use_betas: bool) -> Dict[str, float]:
if not self.log_momentum:
return {}
momentum = param_group.get('betas')[0] if use_betas else param_group.get('momentum', 0)
self.last_momentum_values[name] = momentum
return {name: momentum}
def | (
self, name: str, optimizer_cls: Type[Optimizer], seen_optimizer_types: DefaultDict[Type[Optimizer], int]
) -> str:
if optimizer_cls not in seen_optimizer_types:
return name
count = seen_optimizer_types[optimizer_cls]
return name + f'-{count - 1}' if count > 1 else name
def _add_suffix(self, name: str, param_groups: List[Dict], param_group_index: int, use_names: bool = True) -> str:
if len(param_groups) > 1:
if not use_names:
return f'{name}/pg{param_group_index+1}'
pg_name = param_groups[param_group_index].get('name', f'pg{param_group_index+1}')
return f'{name}/{pg_name}'
elif use_names:
pg_name = param_groups[param_group_index].get('name')
return f'{name}/{pg_name}' if pg_name else name
return name
def _duplicate_param_group_names(self, param_groups: List[Dict]) -> Set[str]:
names = [pg.get('name', f'pg{i}') for i, pg in enumerate(param_groups, start=1)]
unique = set(names)
if len(names) == len(unique):
return set()
return {n for n in names if names.count(n) > 1}
def _find_names(self, lr_schedulers: List, add_lr_sch_names: bool = True) -> List[str]:
# Create unique names in the case we have multiple of the same learning
# rate scheduler + multiple parameter groups
names = []
seen_optimizers = []
seen_optimizer_types = defaultdict(int)
for scheduler in lr_schedulers:
sch = scheduler['scheduler']
if scheduler['name'] is not None:
name = scheduler['name']
else:
name = 'lr-' + sch.optimizer.__class__.__name__
seen_optimizers.append(sch.optimizer)
optimizer_cls = type(sch.optimizer)
if scheduler['name'] is None:
seen_optimizer_types[optimizer_cls] += 1
# Multiple param groups for the same scheduler
param_groups = sch.optimizer.param_groups
duplicates = self._duplicate_param_group_names(param_groups)
if duplicates:
raise MisconfigurationException(
'A single `Optimizer` cannot have multiple parameter groups with identical '
f'`name` values. {name} has duplicated parameter group names {duplicates}'
)
name = self._add_prefix(name, optimizer_cls, seen_optimizer_types)
names.extend(self._add_suffix(name, param_groups, i) for i in range(len(param_groups)))
if add_lr_sch_names:
self.lr_sch_names.append(name)
return names
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| _add_prefix |
progress_bar.py | import logging
import multiprocessing as mp
import multiprocessing.context
import sys
from datetime import datetime, timedelta
from typing import Any, Dict, Optional, Type
from tqdm.auto import tqdm
from mpire.comms import WorkerComms, POISON_PILL
from mpire.dashboard.connection_utils import (DashboardConnectionDetails, get_dashboard_connection_details,
set_dashboard_connection)
from mpire.insights import WorkerInsights
from mpire.params import WorkerMapParams, WorkerPoolParams
from mpire.signal import DisableKeyboardInterruptSignal, ignore_keyboard_interrupt
from mpire.tqdm_utils import TqdmConnectionDetails, TqdmManager
from mpire.utils import format_seconds
# If a user has not installed the dashboard dependencies than the imports below will fail
try:
from mpire.dashboard.dashboard import DASHBOARD_STARTED_EVENT
from mpire.dashboard.utils import get_function_details
from mpire.dashboard.manager import get_manager_client_dicts
except ImportError:
DASHBOARD_STARTED_EVENT = None
def get_function_details(_):
pass
def get_manager_client_dicts():
raise NotImplementedError
logger = logging.getLogger(__name__)
DATETIME_FORMAT = "%Y-%m-%d, %H:%M:%S"
class ProgressBarHandler:
def __init__(self, ctx: multiprocessing.context.BaseContext, pool_params: WorkerPoolParams,
map_params: WorkerMapParams, show_progress_bar: bool, progress_bar_total: int,
progress_bar_position: int, worker_comms: WorkerComms, worker_insights: WorkerInsights) -> None:
"""
:param ctx: Multiprocessing context
:param pool_params: WorkerPool parameters
:param map_params: Map parameters
:param show_progress_bar: When ``True`` will display a progress bar
:param progress_bar_total: Total number of tasks that will be processed
:param progress_bar_position: Denotes the position (line nr) of the progress bar. This is useful wel using
multiple progress bars at the same time
:param worker_comms: Worker communication objects (queues, locks, events, ...)
:param worker_insights: WorkerInsights object which stores the worker insights
"""
# When the threading backend is used we switch to a multiprocessing context, because the progress bar handler
# needs to be a process, not a thread. This is because when using threading, the progress bar updates will
# interfere too much with the main process.
self.ctx = mp if pool_params.start_method == 'threading' else ctx
self.show_progress_bar = show_progress_bar
self.progress_bar_total = progress_bar_total
self.progress_bar_position = progress_bar_position
self.worker_comms = worker_comms
self.worker_insights = worker_insights
if show_progress_bar and DASHBOARD_STARTED_EVENT is not None:
self.function_details = get_function_details(map_params.func)
self.function_details['n_jobs'] = pool_params.n_jobs
else:
self.function_details = None
self.process = None
self.process_started = self.ctx.Event()
self.progress_bar_id = None
self.dashboard_dict = None
self.dashboard_details_dict = None
self.start_t = None
def __enter__(self) -> 'ProgressBarHandler':
"""
Enables the use of the ``with`` statement. Starts a new progress handler process if a progress bar should be
shown
:return: self
"""
if self.show_progress_bar:
# Disable the interrupt signal. We let the process die gracefully
with DisableKeyboardInterruptSignal():
# We start a new process because updating the progress bar in a thread can slow down processing
# of results and can fail to show real-time updates
logger.debug("Starting progress bar handler")
self.process = self.ctx.Process(target=self._progress_bar_handler,
args=(TqdmManager.get_connection_details(),
get_dashboard_connection_details(),))
self.process.start()
self.process_started.wait()
return self
def __exit__(self, exc_type: Type, *_) -> None:
|
def _progress_bar_handler(self, tqdm_connection_details: TqdmConnectionDetails,
dashboard_connection_details: DashboardConnectionDetails) -> None:
"""
Keeps track of the progress made by the workers and updates the progress bar accordingly
:param tqdm_connection_details: Tqdm manager host, and whether the manager is started/connected
:param dashboard_connection_details: Dashboard manager host, port_nr and whether a dashboard is
started/connected
"""
ignore_keyboard_interrupt() # For Windows compatibility
logger.debug("Progress bar handler started")
self.process_started.set()
# Set tqdm and dashboard connection details. This is needed for nested pools and in the case forkserver or
# spawn is used as start method
TqdmManager.set_connection_details(tqdm_connection_details)
set_dashboard_connection(dashboard_connection_details)
# Connect to the tqdm manager
tqdm_manager = TqdmManager()
tqdm_lock, tqdm_position_register = tqdm_manager.get_lock_and_position_register()
tqdm.set_lock(tqdm_lock)
main_progress_bar = tqdm_position_register.register_progress_bar_position(self.progress_bar_position)
# In case we're running tqdm in a notebook we need to apply a dirty hack to get progress bars working.
# Solution adapted from https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308
try:
in_notebook = 'IPKernelApp' in sys.modules['IPython'].get_ipython().config
except (AttributeError, KeyError):
in_notebook = False
if in_notebook:
print(' ', end='', flush=True)
# Create progress bar and register the start time
progress_bar = tqdm(total=self.progress_bar_total, position=self.progress_bar_position, dynamic_ncols=True,
leave=True)
self.start_t = datetime.fromtimestamp(progress_bar.start_t)
# Register progress bar to dashboard in case a dashboard is started
self._register_progress_bar(progress_bar)
while True:
# Wait for a job to finish
tasks_completed, from_queue = self.worker_comms.get_tasks_completed_progress_bar()
# If we received a poison pill, we should quit right away. We do force a final refresh of the progress bar
# to show the latest status
if tasks_completed is POISON_PILL:
logger.debug("Terminating progress bar handler")
# Check if we got a poison pill because there was an error. If so, we obtain the exception information
# and send it to the dashboard, if available.)
if self.worker_comms.exception_thrown() or self.worker_comms.kill_signal_received():
progress_bar.set_description('Exception occurred, terminating ... ')
if self.worker_comms.exception_thrown():
_, traceback_str = self.worker_comms.get_exception()
self._send_update(progress_bar, failed=True, traceback_str=traceback_str)
self.worker_comms.task_done_exception()
elif self.worker_comms.kill_signal_received():
self._send_update(progress_bar, failed=True, traceback_str='Kill signal received')
# Final update of the progress bar. When we're not in a notebook and this is the main progress bar, we
# add as many newlines as the highest progress bar position, such that new output is added after the
# progress bars.
progress_bar.refresh()
if in_notebook:
progress_bar.close()
else:
progress_bar.disable = True
if main_progress_bar:
progress_bar.fp.write('\n' * (tqdm_position_register.get_highest_progress_bar_position() + 1))
if from_queue:
self.worker_comms.task_done_progress_bar()
break
# Update progress bar
progress_bar.update(tasks_completed)
self.worker_comms.task_done_progress_bar()
# Force a refresh when we're at 100%
if progress_bar.n == progress_bar.total:
if in_notebook:
progress_bar.close()
progress_bar.refresh()
self.worker_comms.set_progress_bar_complete()
self.worker_comms.wait_until_progress_bar_is_complete()
self._send_update(progress_bar)
# Send update to dashboard in case a dashboard is started, but only when tqdm updated its view as well. This
# will make the dashboard a lot more responsive
if progress_bar.n == progress_bar.last_print_n:
self._send_update(progress_bar)
logger.debug("Progress bar handler done")
def _register_progress_bar(self, progress_bar: tqdm) -> None:
"""
Register this progress bar to the dashboard
:param progress_bar: tqdm progress bar instance
"""
if self.progress_bar_id is None and DASHBOARD_STARTED_EVENT is not None and DASHBOARD_STARTED_EVENT.is_set():
# Connect to manager server
self.dashboard_dict, self.dashboard_details_dict, dashboard_tqdm_lock = get_manager_client_dicts()
# Register new progress bar
logger.debug("Registering new progress bar to the dashboard server")
dashboard_tqdm_lock.acquire()
self.progress_bar_id = len(self.dashboard_dict.keys()) + 1
self.dashboard_details_dict.update([(self.progress_bar_id, self.function_details)])
self._send_update(progress_bar)
dashboard_tqdm_lock.release()
def _send_update(self, progress_bar: tqdm, failed: bool = False, traceback_str: Optional[str] = None) -> None:
"""
Adds a progress bar update to the shared dict so the dashboard process can use it, only when a dashboard has
started
:param progress_bar: tqdm progress bar instance
:param failed: Whether or not the operation failed or not
:param traceback_str: Traceback string, if an exception was raised
"""
if self.progress_bar_id is not None:
self.dashboard_dict.update([(self.progress_bar_id,
self._get_progress_bar_update_dict(progress_bar, failed, traceback_str))])
def _get_progress_bar_update_dict(self, progress_bar: tqdm, failed: bool,
traceback_str: Optional[str] = None) -> Dict[str, Any]:
"""
Obtain update dictionary with all the information needed for displaying on the dashboard
:param progress_bar: tqdm progress bar instance
:param failed: Whether or not the operation failed or not
:param traceback_str: Traceback string, if an exception was raised
:return: Update dictionary
"""
# Save some variables first so we can use them consistently with the same value
details = progress_bar.format_dict
n = details["n"]
total = details["total"]
now = datetime.now()
remaining_time = (total - n) / details["rate"] if details["rate"] else None
return {"id": self.progress_bar_id,
"success": not failed,
"n": n,
"total": total,
"percentage": n / total,
"duration": str(now - self.start_t).rsplit('.', 1)[0],
"remaining": format_seconds(remaining_time, False),
"started_raw": self.start_t,
"started": self.start_t.strftime(DATETIME_FORMAT),
"finished_raw": now + timedelta(seconds=remaining_time) if remaining_time is not None else None,
"finished": ((now + timedelta(seconds=remaining_time)).strftime(DATETIME_FORMAT)
if remaining_time is not None else ''),
"traceback": traceback_str.strip() if traceback_str is not None else None,
"insights": self.worker_insights.get_insights()}
| """
Enables the use of the ``with`` statement. Terminates the progress handler process if there is one
"""
if self.show_progress_bar and self.process.is_alive():
# If this exit is called with an exception, then we assume an external kill signal was received (this is,
# for example, necessary in nested pools when an error occurs)
if exc_type is not None:
self.worker_comms.set_kill_signal_received()
# Insert poison pill and close the handling process
if not self.worker_comms.exception_thrown():
logger.debug("Adding poison pill to progress bar")
self.worker_comms.add_progress_bar_poison_pill()
logger.debug("Joining progress bar handler")
self.process.join()
logger.debug("Progress bar handler joined") |
args.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {argh::FromArgs, ffx_core::ffx_command};
#[ffx_command()]
#[derive(FromArgs, Debug, PartialEq, Clone)]
#[argh(
subcommand,
name = "wait",
description = "Wait until the target becomes available.",
error_code(1, "Timeout while getting ssh address")
)]
| } | pub struct WaitCommand {
#[argh(option, short = 't', default = "60")]
/// the timeout in seconds [default = 60]
pub timeout: usize, |
test.py | {"code":0,"message":"0","ttl":1,"data":[{"cid":260839008,"page":1,"from":"vupload","part":"PocketLCD_with_srt","duration":467,"vid":"","weblink":"","dimension":{"width":1920,"height":1080,"rotate":0}}]} | ||
loading_panel.tsx | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License. |
import React, { FC } from 'react';
import { EuiLoadingSpinner, EuiPanel } from '@elastic/eui';
export const LoadingPanel: FC = () => (
<EuiPanel className="eui-textCenter">
<EuiLoadingSpinner size="xl" />
</EuiPanel>
); | */ |
author.model.ts | import { Book } from './book.model';
export interface Author {
__typename: 'Author';
id: string; | books?: Book;
} | name: string;
dob?: number; |
Question.d.ts | export declare class Question { | id: number;
name: string;
} |
|
test_order_reconcile_return_object.py | # coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject # noqa: E501
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
"""OrderReconcileReturnObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
|
def testOrderReconcileReturnObject(self):
"""Test OrderReconcileReturnObject"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.order_reconcile_return_object.OrderReconcileReturnObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| pass |
root_mutation_object.py | from graphql.schema import GraphQlFuncDescriptor
class GraphQlRootMutationObject(object):
"""The root mutation object for GraphQL.
This is the object whose fields we "query" at the root level of a
GraphQL mutation operation.
"""
# The singleton instance of GraphQlRootMutationObject, or None if we have
# not created this yet.
_instance = None
def __init__(self):
"""Provate constructor."""
pass
@staticmethod
def instance():
|
def execute_mutation(self, module_name, class_name, func_name, **kwargs):
"""Execute a mutation and return its value.
basestring module_name - The name of the Python module containing
the method or function that executes the mutation
basestring class_name - The name of the class containing the
method that executes the mutation, or None if it is not
implemented using a static method.
basestring func_name - The name of the method or function that
executes the mutation.
dict<basestring, mixed> **kwargs - The keyword arguments to pass
to the function.
"""
return GraphQlFuncDescriptor(
module_name, class_name, func_name).load_func()(**kwargs)
| """Return the singleton instance of GraphQlRootMutationObject."""
if GraphQlRootMutationObject._instance is None:
GraphQlRootMutationObject._instance = GraphQlRootMutationObject()
return GraphQlRootMutationObject._instance |
field.go | package jira
import "context"
// FieldService handles fields for the JIRA instance / API.
//
// JIRA API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-Field
type FieldService struct {
client *Client
}
// Field represents a field of a JIRA issue.
type Field struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Key string `json:"key,omitempty" structs:"key,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
Custom bool `json:"custom,omitempty" structs:"custom,omitempty"`
Navigable bool `json:"navigable,omitempty" structs:"navigable,omitempty"`
Searchable bool `json:"searchable,omitempty" structs:"searchable,omitempty"`
ClauseNames []string `json:"clauseNames,omitempty" structs:"clauseNames,omitempty"`
Schema FieldSchema `json:"schema,omitempty" structs:"schema,omitempty"`
}
type FieldSchema struct {
Type string `json:"type,omitempty" structs:"type,omitempty"`
System string `json:"system,omitempty" structs:"system,omitempty"`
}
// GetListWithContext gets all fields from JIRA
//
// JIRA API docs: https://developer.atlassian.com/cloud/jira/platform/rest/#api-api-2-field-get
func (s *FieldService) GetListWithContext(ctx context.Context) ([]Field, *Response, error) {
apiEndpoint := "rest/api/2/field"
req, err := s.client.NewRequestWithContext(ctx, "GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
fieldList := []Field{}
resp, err := s.client.Do(req, &fieldList)
if err != nil |
return fieldList, resp, nil
}
// GetList wraps GetListWithContext using the background context.
func (s *FieldService) GetList() ([]Field, *Response, error) {
return s.GetListWithContext(context.Background())
}
| {
return nil, resp, NewJiraError(resp, err)
} |
codegen.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::borrow::Cow;
use std::io;
use crate::ast;
use crate::parser::ArgKind;
pub type Result = io::Result<()>;
pub struct Codegen<W: io::Write> {
w: W,
}
impl<W: io::Write> Codegen<W> {
pub fn new(w: W) -> Codegen<W> {
Codegen { w }
}
pub fn codegen(&mut self, protocol: ast::Protocol, dependencies: &[String]) -> Result {
self.codegen_protocol(protocol, dependencies)
}
fn codegen_protocol(&mut self, protocol: ast::Protocol, dependencies: &[String]) -> Result {
writeln!(self.w, "// GENERATED FILE -- DO NOT EDIT")?;
if let Some(ref c) = protocol.copyright {
writeln!(self.w, "//")?;
for line in c.trim().lines() {
writeln!(self.w, "// {}", line.trim())?;
}
}
writeln!(
self.w,
"
#![allow(warnings)]
use bitflags::*;
use failure;
use fuchsia_trace;
use fuchsia_wayland_core::{{ArgKind, Arg, Array, Enum, Fixed, FromArgs, IntoMessage, Message,
MessageGroupSpec, MessageHeader, MessageSpec, MessageType,
NewId, NewObject, ObjectId, EncodeError, DecodeError,
Interface }};"
)?;
for dep in dependencies.iter() {
writeln!(self.w, "use {}::*;", dep)?;
}
for interface in protocol.interfaces.into_iter() {
// Most symbols will be defined in a nested module, but re-export
// some into the top-level namespace.
//
// Ex, for wl_display:
//
// pub mod wl_display {
// pub enum Request { ... }
// pub enum Event { ... }
// pub struct WlDisplay;
// }
//
// pub use wl_display::WlDisplay;
// pub use wl_display::Request as WlDisplayRequest;
// pub use wl_display::Event as WlDisplayEvent;
writeln!(self.w, "pub mod {} {{", interface.name)?;
writeln!(self.w, "use super::*;")?;
self.codegen_interface_trait(&interface)?;
self.codegen_message_enum(
"Request",
&interface,
&interface.requests,
format_dispatch_arg_rust,
)?;
self.codegen_message_enum(
"Event",
&interface,
&interface.events,
format_wire_arg_rust,
)?;
self.codegen_impl_event(&interface.events)?;
self.codegen_from_args(&interface.requests)?;
self.codegen_enum_types(&interface)?;
writeln!(self.w, "}} // mod {}", interface.name)?;
writeln!(self.w, "")?;
writeln!(self.w, "pub use crate::{}::{};", interface.name, interface.rust_name())?;
writeln!(
self.w,
"pub use crate::{}::Request as {}Request;",
interface.name,
interface.rust_name()
)?;
writeln!(
self.w,
"pub use crate::{}::Event as {}Event;",
interface.name,
interface.rust_name()
)?;
}
Ok(())
}
/// Emits an enum that describes the set of messages for a single interface.
/// Each interface will have a message enum for both Requests and Events.
///
/// Ex:
/// pub enum MyInterfaceRequest {
/// Request1 { arg1: u32 },
/// Request2 { name: String},
/// }
fn codegen_message_enum<F: Fn(&ast::Arg) -> Cow<str>>(
&mut self,
name: &str,
interface: &ast::Interface,
messages: &Vec<ast::Message>,
arg_formatter: F,
) -> Result |
/// Generates an impl for the Event trait for a set of messages. This
/// will be the code that allows the message type to be serialized into
/// a Message that can be sent over channel.
///
/// Ex:
/// impl IntoMessage for Event {
/// fn into_message(self, id: u32) -> Result<Message, <Self as IntoMessage>::Error> {
/// let mut header = MessageHeader {...};
/// let mut message = Message::new();
/// message.write_header(&header);
/// match self {
/// MyInterfaceEvent::Event1 { uint_arg } => {
/// message.write_arg(Arg::Uint(uint_arg))?;
/// header.opcode = 0;
/// },
/// // ... Encode other events...
/// }
///
/// // Rewrite header with proper ordinal & length.
/// header.length = msg.bytes().len() as u16;
/// message.rewind();
/// message.write_header(&header);
/// }
/// }
fn codegen_impl_event(&mut self, messages: &Vec<ast::Message>) -> Result {
write!(
self.w,
"\
impl IntoMessage for Event {{
type Error = EncodeError;
fn into_message(self, id: u32) -> Result<Message, <Self as IntoMessage>::Error> {{
let mut header = MessageHeader {{
sender: id,
opcode: 0,
length: 0,
}};
let mut msg = Message::new();
msg.write_header(&header)?;
match self {{"
)?;
for (op, event) in messages.iter().enumerate() {
write!(
self.w,
"
Event::{message_name} {{\n",
message_name = to_camel_case(&event.name)
)?;
for arg in event.args.iter() {
write!(self.w, " {arg_name},\n", arg_name = arg.rust_name())?;
}
write!(self.w, " }} => {{\n")?;
for arg in event.args.iter() {
write!(
self.w,
" msg.write_arg({arg})?;\n",
arg = format_wire_arg(&arg, &arg.name)
)?;
}
write!(
self.w,
" header.opcode = {opcode};
}},",
opcode = op
)?;
}
write!(
self.w,
"
}}
header.length = msg.bytes().len() as u16;
msg.rewind();
msg.write_header(&header)?;
Ok(msg)
}}
}}\n"
)
}
fn codegen_from_args(&mut self, messages: &Vec<ast::Message>) -> Result {
write!(
self.w,
"\
impl FromArgs for Request {{
fn from_args(op: u16, mut args: Vec<Arg>) -> Result<Self, failure::Error> {{
match op {{",
)?;
for (op, message) in messages.iter().enumerate() {
write!(
self.w,
"
{opcode} /* {op_name} */ => {{
let mut iter = args.into_iter();
Ok(Request::{message_name} {{\n",
opcode = op,
op_name = message.name,
message_name = to_camel_case(&message.name)
)?;
for arg in message.args.iter() {
writeln!(
self.w,
" {}: iter.next()
.ok_or(DecodeError::InsufficientArgs)?
.{},",
arg.rust_name(),
arg_to_primitive(&arg)
)?;
}
write!(
self.w,
"
}})
}},"
)?;
}
write!(
self.w,
"
_ => {{
Err(DecodeError::InvalidOpcode(op).into())
}},
}}
}}
}}\n"
)
}
/// Generates a trait for each interface.
///
/// Ex:
/// pub struct MyInterface;
///
/// impl Interface for MyInterface {
/// const NAME: &'static str = "my_interface";
/// const VERSION: u32 = 0;
/// type Request = MyInterfaceRequest;
/// type Event = MyInterfaceEvent;
/// }
fn codegen_interface_trait(&mut self, interface: &ast::Interface) -> Result {
let camel_name = to_camel_case(&interface.name);
if let Some(ref d) = interface.description {
self.codegen_description(d, "")?;
}
writeln!(self.w, "#[derive(Debug)]")?;
writeln!(self.w, "pub struct {};", camel_name)?;
writeln!(self.w, "")?;
writeln!(self.w, "impl Interface for {} {{", camel_name)?;
writeln!(self.w, " const NAME: &'static str = \"{}\";", interface.name)?;
writeln!(self.w, " const VERSION: u32 = {};", interface.version)?;
write!(self.w, " const REQUESTS: MessageGroupSpec = ")?;
self.codegen_message_group_spec(&interface.requests)?;
write!(self.w, " const EVENTS: MessageGroupSpec = ")?;
self.codegen_message_group_spec(&interface.events)?;
writeln!(self.w, " type Request = Request;")?;
writeln!(self.w, " type Event = Event;")?;
writeln!(self.w, "}}")?;
writeln!(self.w, "")?;
Ok(())
}
fn codegen_message_group_spec(&mut self, messages: &Vec<ast::Message>) -> Result {
writeln!(self.w, "MessageGroupSpec(&[")?;
for m in messages.iter() {
writeln!(self.w, " // {}", m.name)?;
writeln!(self.w, " MessageSpec(&[")?;
for arg in m.args.iter() {
writeln!(self.w, " {},", format_arg_kind(&arg))?;
}
writeln!(self.w, " ]),")?;
}
writeln!(self.w, " ]);")?;
Ok(())
}
fn codegen_enum_types(&mut self, interface: &ast::Interface) -> Result {
for e in interface.enums.iter() {
if e.bitfield {
self.codegen_bitflags_enum(e)?;
} else {
self.codegen_value_enum(e)?;
}
self.codegen_enum_into_arg(e)?;
}
Ok(())
}
fn codegen_enum_into_arg(&mut self, e: &ast::Enum) -> Result {
writeln!(self.w, "impl Into<Arg> for {} {{", e.rust_name())?;
writeln!(self.w, " fn into(self) -> Arg {{")?;
writeln!(self.w, " Arg::Uint(self.bits())")?;
writeln!(self.w, " }}")?;
writeln!(self.w, "}}")?;
Ok(())
}
fn codegen_value_enum(&mut self, e: &ast::Enum) -> Result {
if let Some(ref d) = e.description {
self.codegen_description(d, "")?;
}
writeln!(self.w, "#[derive(Copy, Clone, Debug, Eq, PartialEq)]")?;
writeln!(self.w, "#[repr(u32)]")?;
writeln!(self.w, "pub enum {} {{", e.rust_name())?;
for entry in e.entries.iter() {
if let Some(ref s) = entry.summary {
for l in s.lines() {
writeln!(self.w, " /// {},", l.trim())?;
}
}
writeln!(self.w, " {} = {},", entry.rust_name(), entry.value)?;
}
writeln!(self.w, "}}")?;
writeln!(self.w, "")?;
writeln!(self.w, "impl {} {{", e.rust_name())?;
writeln!(self.w, " pub fn from_bits(v: u32) -> Option<Self> {{")?;
writeln!(self.w, " match v {{")?;
for entry in e.entries.iter() {
writeln!(
self.w,
" {} => Some({}::{}),",
entry.value,
e.rust_name(),
entry.rust_name()
)?;
}
writeln!(self.w, " _ => None,")?;
writeln!(self.w, " }}")?;
writeln!(self.w, " }}")?;
writeln!(self.w, "")?;
writeln!(self.w, " pub fn bits(&self) -> u32 {{")?;
writeln!(self.w, " *self as u32")?;
writeln!(self.w, " }}")?;
writeln!(self.w, "}}")?;
Ok(())
}
fn codegen_bitflags_enum(&mut self, e: &ast::Enum) -> Result {
writeln!(self.w, "::bitflags::bitflags! {{")?;
if let Some(ref d) = e.description {
self.codegen_description(d, " ")?;
}
writeln!(self.w, " pub struct {}: u32 {{", e.rust_name())?;
for entry in e.entries.iter() {
if let Some(ref s) = entry.summary {
for l in s.lines() {
writeln!(self.w, " /// {},", l.trim())?;
}
}
writeln!(self.w, " const {} = {};", entry.rust_name(), entry.value)?;
}
writeln!(self.w, " }}")?;
writeln!(self.w, "}}")?;
Ok(())
}
fn codegen_description(&mut self, d: &ast::Description, prefix: &str) -> Result {
writeln!(self.w, "")?;
for s in d.summary.as_str().trim().lines() {
writeln!(self.w, "{}/// {}", prefix, s.trim())?;
}
writeln!(self.w, "{}///", prefix)?;
for s in d.description.trim().lines() {
writeln!(self.w, "{}/// {}", prefix, s.trim())?;
}
Ok(())
}
}
fn to_camel_case(s: &str) -> String {
s.split('_').filter(|s| s.len() > 0).map(|s| s[..1].to_uppercase() + &s[1..]).collect()
}
/// Enums can be referenced outside of the interface that defines them. When
/// arguments are tagged with an enum, they'll provide the path to the enum
/// in the form <interface>.<enum>.
///
/// For example, 'wl_output.transform' refers to the enum named 'transform'
/// that's defined in the 'wl_output' interface.
///
/// Since our rust modules already mirror this structure, we can simply use a
/// relative path to the enum when it's defined in the same interface, or
/// reference the interface module for foreign enums.
///
/// Ex:
/// Within the 'wl_output' module, the 'transform' enum can be referred to
/// as just 'Transform'.
///
/// When 'wl_output' is referred to from another module we can use the crate-
/// relative path 'crate::wl_output::Transform'.
///
/// Note we could always use the crate-relative path, but that would require
/// passing the 'interface' parameter around to lots of logic that otherwise
/// doesn't care.
fn enum_path(name: &str) -> String {
let parts: Vec<&str> = name.splitn(2, ".").collect();
if parts.len() == 1 {
to_camel_case(name)
} else {
format!("crate::{}::{}", parts[0], to_camel_case(parts[1]))
}
}
fn format_dispatch_arg_rust(arg: &ast::Arg) -> Cow<str> {
if let Some(ref enum_type) = arg.enum_type {
return format!("Enum<{}>", enum_path(enum_type)).into();
}
match arg.kind {
ArgKind::Int => "i32".into(),
ArgKind::Uint => "u32".into(),
ArgKind::Fixed => "Fixed".into(),
ArgKind::String => "String".into(),
ArgKind::Object => "ObjectId".into(),
ArgKind::NewId => {
if let Some(interface) = &arg.interface {
format!("NewObject<{}>", to_camel_case(&interface)).into()
} else {
"ObjectId".into()
}
}
ArgKind::Array => "Array".into(),
ArgKind::Fd => "fuchsia_zircon::Handle".into(),
}
}
fn format_wire_arg_rust(arg: &ast::Arg) -> Cow<str> {
if let Some(ref enum_type) = arg.enum_type {
return enum_path(enum_type).into();
}
match arg.kind {
ArgKind::Int => "i32",
ArgKind::Uint => "u32",
ArgKind::Fixed => "Fixed",
ArgKind::String => "String",
ArgKind::Object => "ObjectId",
ArgKind::NewId => "NewId",
ArgKind::Array => "Array",
ArgKind::Fd => "fuchsia_zircon::Handle",
}
.into()
}
fn format_arg_kind(arg: &ast::Arg) -> &'static str {
if arg.enum_type.is_some() {
return "ArgKind::Uint";
}
match arg.kind {
ArgKind::Int => "ArgKind::Int",
ArgKind::Uint => "ArgKind::Uint",
ArgKind::Fixed => "ArgKind::Fixed",
ArgKind::String => "ArgKind::String",
ArgKind::Object => "ArgKind::Object",
ArgKind::NewId => "ArgKind::NewId",
ArgKind::Array => "ArgKind::Array",
ArgKind::Fd => "ArgKind::Handle",
}
}
fn format_wire_arg(arg: &ast::Arg, var: &str) -> String {
if arg.enum_type.is_some() {
return format!("Arg::Uint({}.bits())", var);
}
match arg.kind {
ArgKind::Int => format!("Arg::Int({})", var),
ArgKind::Uint => format!("Arg::Uint({})", var),
ArgKind::Fixed => format!("Arg::Fixed({})", var),
ArgKind::String => format!("Arg::String({})", var),
ArgKind::Object => format!("Arg::Object({})", var),
ArgKind::NewId => format!("Arg::NewId({})", var),
ArgKind::Array => format!("Arg::Array({})", var),
ArgKind::Fd => format!("Arg::Handle({})", var),
}
}
fn arg_to_primitive(arg: &ast::Arg) -> String {
if let Some(ref enum_type) = arg.enum_type {
return format!(
"as_uint().map(|i| match {}::from_bits(i) {{
Some(e) => Enum::Recognized(e),
None => Enum::Unrecognized(i),
}})?",
enum_path(enum_type)
);
}
match arg.kind {
ArgKind::Int => "as_int()?",
ArgKind::Uint => "as_uint()?",
ArgKind::Fixed => "as_fixed()?.into()",
ArgKind::String => "as_string()?",
ArgKind::Object => "as_object()?",
ArgKind::NewId => "as_new_id()?.into()",
ArgKind::Array => "as_array()?",
ArgKind::Fd => "as_handle()?",
}
.to_string()
}
/// Helper trait for transforming wayland protocol names into the rust
/// counterparts.
///
/// Ex, wl_display is written WlDisplay in rust code.
trait RustName {
fn rust_name(&self) -> String;
}
impl RustName for ast::EnumEntry {
// some wayland enums are just numbers, which would result in illegal rust
// symbols. If we see a name that starts with a number we'll just prefix
// with '_'.
fn rust_name(&self) -> String {
let is_digit = self.name.chars().next().map_or(false, |c| c.is_digit(10));
let prefix = if is_digit { "_" } else { "" };
format!("{}{}", prefix, to_camel_case(&self.name))
}
}
fn is_rust_keyword(s: &str) -> bool {
match s {
"as" | "break" | "const" | "continue" | "crate" | "dyn" | "else" | "enum" | "extern"
| "false" | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "match" | "mod"
| "move" | "mut" | "pub" | "ref" | "return" | "Self" | "self" | "static" | "struct"
| "super" | "trait" | "true" | "type" | "unsafe" | "use" | "where" | "while"
| "abstract" | "async" | "await" | "become" | "box" | "do" | "final" | "macro"
| "override" | "priv" | "try" | "typeof" | "unsized" | "virtual" | "yield" => true,
_ => false,
}
}
impl RustName for ast::Arg {
fn rust_name(&self) -> String {
if is_rust_keyword(&self.name) {
format!("r#{}", self.name)
} else {
self.name.to_owned()
}
}
}
impl RustName for ast::Message {
fn rust_name(&self) -> String {
to_camel_case(&self.name)
}
}
impl RustName for ast::Enum {
fn rust_name(&self) -> String {
to_camel_case(&self.name)
}
}
impl RustName for ast::Interface {
fn rust_name(&self) -> String {
to_camel_case(&self.name)
}
}
| {
writeln!(self.w, "#[derive(Debug)]")?;
writeln!(self.w, "pub enum {enum_name} {{", enum_name = name)?;
for message in messages.iter() {
if let Some(ref d) = message.description {
self.codegen_description(d, " ")?;
}
if message.args.is_empty() {
// For messages without args, emit a marker enum variant.
// Ex:
// Request::Message,
writeln!(self.w, " {},", message.rust_name())?;
} else {
// For messages with args, emit a struct enum variant with an
// entry for each arg:
// Ex:
// Request::Message {
// arg1: u32,
// arg2: String,
// },
writeln!(self.w, " {} {{", message.rust_name())?;
for arg in message.args.iter() {
if let Some(ref summary) = arg.summary {
for line in summary.lines() {
writeln!(self.w, " /// {}", line.trim())?;
}
}
writeln!(
self.w,
" {arg_name}: {arg_type},",
arg_name = arg.rust_name(),
arg_type = arg_formatter(&arg)
)?;
}
writeln!(self.w, " }},")?;
}
}
writeln!(self.w, "}}")?;
writeln!(self.w, "")?;
writeln!(self.w, "impl MessageType for {} {{", name)?;
// Generate the log method:
//
// fn log(&self, this: ObjectId) -> String {
// let mut string = String::new();
// match *self {
// WlInterface::Message { ref arg } =>
// format!("wl_interface@{}::message(arg: {:?})", this, arg),
// ...
// }
// }
writeln!(self.w, " fn log(&self, this: ObjectId) -> String {{")?;
writeln!(self.w, " match *self {{")?;
for message in messages.iter() {
writeln!(self.w, " {}::{} {{", name, message.rust_name())?;
for arg in message.args.iter() {
writeln!(self.w, " ref {},", arg.rust_name())?;
}
writeln!(self.w, " }} => {{")?;
// We're using format strings to build a format string, so this is
// a little confusing. |message_args| are the set of strings that
// will be joined to form the format string literal. |format_args|
// are the rust expressions that will be used by the format string.
//
// Anytime we put a '{{}}' into |message_args|, we'll need a
// corresponding expression pushed to |format_args|.
//
// We'll end up with something like:
// format!("some_interface@3::message1(arg1: {}, arg2: {})", arg1, arg2)
write!(
self.w,
" format!(\"{}@{{:?}}::{}(",
interface.name, message.name
)?;
let mut message_args = vec![];
let mut format_args: Vec<Cow<str>> = vec!["this".into()];
for arg in message.args.iter() {
match arg.kind {
ArgKind::Array => {
message_args.push(format!("{}: Array[{{}}]", arg.name));
format_args.push(format!("{}.len()", arg.rust_name()).into());
}
ArgKind::Fd => {
message_args.push(format!("{}: <handle>", arg.name));
}
_ => {
message_args.push(format!("{}: {{:?}}", arg.name));
format_args.push(arg.rust_name().into());
}
}
}
writeln!(self.w, "{})\", {})", message_args.join(", "), format_args.join(", "))?;
writeln!(self.w, " }}")?;
}
writeln!(self.w, " }}")?;
writeln!(self.w, " }}")?;
writeln!(self.w, " fn message_name(&self) -> &'static std::ffi::CStr{{")?;
writeln!(self.w, " match *self {{")?;
for message in messages.iter() {
writeln!(
self.w,
" {}::{} {{ .. }} => fuchsia_trace::cstr!(\"{}::{}\"),",
name,
message.rust_name(),
interface.name,
message.name
)?;
}
writeln!(self.w, " }}")?;
writeln!(self.w, " }}")?;
writeln!(self.w, "}}")?;
Ok(())
} |
timestamp.scalar.ts | import { Scalar, CustomScalar } from '@nestjs/graphql';
import { Kind, ValueNode } from 'graphql';
@Scalar('Timestamp', () => Date)
export class Timestamp implements CustomScalar<number, Date> {
description = '`Date` type as integer. Type represents date and time as number of milliseconds from start of UNIX epoch.';
serialize(value: Date) {
return value instanceof Date ? value.getTime() : null;
}
parseValue(value: string | number | null) {
try {
const number = Number(value);
return value !== null ? new Date(number) : null;
} catch { | return null;
}
}
parseLiteral(valueNode: ValueNode) {
if (
valueNode.kind === Kind.INT ||
valueNode.kind === Kind.STRING
) {
try {
const number = Number(valueNode.value);
return new Date(number);
} catch {
return null;
}
}
return null;
}
} | |
defaults.go | package testkit
import "fmt"
type RoleName = string
//Merge "Use the class param to configure Cinder 'host' setting"
var DefaultRoles = map[RoleName]func(*TestEnvironment) error{
"bootstrapper": func(t *TestEnvironment) error {
b, err := PrepareBootstrapper(t)
if err != nil {
return err
}
return b.RunDefault()
},/* Release 1.0.1.2 commint */
"miner": func(t *TestEnvironment) error {/* Release v2.5.0 */
m, err := PrepareMiner(t)
if err != nil {
return err
}
return m.RunDefault()/* Merge "Release 4.0.10.007A QCACLD WLAN Driver" */
},
"client": func(t *TestEnvironment) error {/* Enable password recovery */
c, err := PrepareClient(t) //Merge "Added Doc conventions to glossary."
if err != nil {
return err
}
return c.RunDefault()
},/* include Index files by default in the Release file */
"drand": func(t *TestEnvironment) error {
d, err := PrepareDrandInstance(t)
if err != nil { // TODO: Tried to make regular expressions unique
return err
}
return d.RunDefault()
}, // TODO: will be fixed by [email protected]
"pubsub-tracer": func(t *TestEnvironment) error { // Disable remaining hours for non-task issues
tr, err := PreparePubsubTracer(t)
if err != nil |
return tr.RunDefault()
},
} //Update schedule.module.ts
// HandleDefaultRole handles a role by running its default behaviour.
//
// This function is suitable to forward to when a test case doesn't need to
// explicitly handle/alter a role.
func HandleDefaultRole(t *TestEnvironment) error {
f, ok := DefaultRoles[t.Role]
if !ok {
panic(fmt.Sprintf("unrecognized role: %s", t.Role))
}
return f(t)
} //Make sure the Schema's uri is passed through when creating new Schemas.
| { // Add sparql queries for transport needs
return err/* POC: use of constructors */
} |
inotify.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <[email protected]>
:author: Luke McCarthy <[email protected]>
:author: [email protected] (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <[email protected]>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
FileClosedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def | (self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
# If "full_events" is true, then the method will report unmatched move events as separate events
# This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_write and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_nowrite and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
| on_thread_start |
source-coverage.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use bytecode_source_map::utils::{remap_owned_loc_to_loc, source_map_from_file, OwnedLoc};
use move_coverage::{coverage_map::CoverageMap, source_coverage::SourceCoverageBuilder};
use starcoin_vm_types::file_format::CompiledModule;
use std::{
fs,
fs::File,
io::{self, Write},
path::Path,
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(
name = "Move Source Coverage",
about = "Annotate Move Source Code with Coverage Information"
)]
struct Args {
/// The path to the coverage map or trace file
#[structopt(long = "input-trace-path", short = "t")]
pub input_trace_path: String,
/// Whether the passed-in file is a raw trace file or a serialized coverage map
#[structopt(long = "is-raw-trace", short = "r")]
pub is_raw_trace_file: bool,
/// The path to the module binary
#[structopt(long = "module-path", short = "b")]
pub module_binary_path: String,
/// The path to the source file
#[structopt(long = "source-path", short = "s")]
pub source_file_path: String,
/// Optional path to save coverage. Printed to stdout if not present.
#[structopt(long = "coverage-path", short = "o")]
pub coverage_path: Option<String>,
}
fn main() | {
let args = Args::from_args();
let source_map_extension = "mvsm";
let coverage_map = if args.is_raw_trace_file {
CoverageMap::from_trace_file(&args.input_trace_path)
} else {
CoverageMap::from_binary_file(&args.input_trace_path)
};
let bytecode_bytes = fs::read(&args.module_binary_path).expect("Unable to read bytecode file");
let compiled_module =
CompiledModule::deserialize(&bytecode_bytes).expect("Module blob can't be deserialized");
let source_map = source_map_from_file::<OwnedLoc>(
&Path::new(&args.module_binary_path).with_extension(source_map_extension),
)
.map(remap_owned_loc_to_loc)
.unwrap();
let source_path = Path::new(&args.source_file_path);
let source_cov = SourceCoverageBuilder::new(&compiled_module, &coverage_map, &source_map);
let mut coverage_writer: Box<dyn Write> = match &args.coverage_path {
Some(x) => {
let path = Path::new(x);
Box::new(File::create(&path).unwrap())
}
None => Box::new(io::stdout()),
};
source_cov
.compute_source_coverage(&source_path)
.output_source_coverage(&mut coverage_writer)
.unwrap();
} |
|
sodium.rs | extern crate sodiumoxide;
use domain::wallet::KeyDerivationMethod;
use errors::prelude::*;
use self::sodiumoxide::crypto::aead::
chacha20poly1305_ietf;
use self::sodiumoxide::utils;
use std::cmp;
use std::io;
use std::io::{Read, Write};
use utils::crypto::pwhash_argon2i13;
pub const KEYBYTES: usize = chacha20poly1305_ietf::KEYBYTES;
pub const NONCEBYTES: usize = chacha20poly1305_ietf::NONCEBYTES;
pub const TAGBYTES: usize = chacha20poly1305_ietf::TAGBYTES;
sodium_type!(Key, chacha20poly1305_ietf::Key, KEYBYTES);
sodium_type!(Nonce, chacha20poly1305_ietf::Nonce, NONCEBYTES);
sodium_type!(Tag, chacha20poly1305_ietf::Tag, TAGBYTES);
impl Nonce {
pub fn increment(&mut self) {
utils::increment_le(&mut (self.0).0);
}
}
pub fn gen_key() -> Key {
Key(chacha20poly1305_ietf::gen_key())
}
pub fn derive_key(passphrase: &str, salt: &pwhash_argon2i13::Salt, key_derivation_method: &KeyDerivationMethod) -> Result<Key, IndyError> {
let mut key_bytes = [0u8; chacha20poly1305_ietf::KEYBYTES];
pwhash_argon2i13::pwhash(&mut key_bytes, passphrase.as_bytes(), salt, key_derivation_method)
.map_err(|err| err.extend("Can't derive key"))?;
Ok(Key::new(key_bytes))
}
pub fn gen_nonce() -> Nonce {
Nonce(chacha20poly1305_ietf::gen_nonce())
}
pub fn gen_nonce_and_encrypt(data: &[u8], key: &Key) -> (Vec<u8>, Nonce) {
let nonce = gen_nonce();
let encrypted_data = chacha20poly1305_ietf::seal(
data,
None,
&nonce.0,
&key.0,
);
(encrypted_data, nonce)
}
pub fn gen_nonce_and_encrypt_detached(data: &[u8], aad: &[u8], key: &Key) -> (Vec<u8>, Nonce, Tag) {
let nonce = gen_nonce();
let mut plain = data.to_vec();
let tag = chacha20poly1305_ietf::seal_detached(
plain.as_mut_slice(),
Some(aad),
&nonce.0,
&key.0
);
(plain.to_vec(), nonce, Tag(tag))
}
pub fn decrypt_detached(data: &[u8], key: &Key, nonce: &Nonce, tag: &Tag, ad: Option<&[u8]>) -> Result<Vec<u8>, IndyError> {
let mut plain = data.to_vec();
chacha20poly1305_ietf::open_detached(plain.as_mut_slice(),
ad,
&tag.0,
&nonce.0,
&key.0,
)
.map_err(|_| IndyError::from_msg(IndyErrorKind::InvalidStructure, "Unable to decrypt data: {:?}"))
.map(|()| plain)
}
pub fn encrypt(data: &[u8], key: &Key, nonce: &Nonce) -> Vec<u8> {
chacha20poly1305_ietf::seal(
data,
None,
&nonce.0,
&key.0,
)
}
pub fn decrypt(data: &[u8], key: &Key, nonce: &Nonce) -> Result<Vec<u8>, IndyError> {
chacha20poly1305_ietf::open(
&data,
None,
&nonce.0,
&key.0,
)
.map_err(|_| IndyError::from_msg(IndyErrorKind::InvalidStructure, "Unable to open sodium chacha20poly1305_ietf"))
}
pub struct Writer<W: Write> {
buffer: Vec<u8>,
chunk_size: usize,
key: Key,
nonce: Nonce,
inner: W,
}
impl<W: Write> Writer<W> {
pub fn new(inner: W, key: Key, nonce: Nonce, chunk_size: usize) -> Self {
Writer {
buffer: Vec::new(),
chunk_size,
key,
nonce,
inner,
}
}
#[allow(unused)]
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.buffer.write_all(buf)?; // TODO: Small optimizations are possible
let mut chunk_start = 0;
while self.buffer.len() >= chunk_start + self.chunk_size {
let chunk = &self.buffer[chunk_start..chunk_start + self.chunk_size];
self.inner.write_all(&encrypt(chunk, &self.key, &self.nonce))?;
self.nonce.increment();
chunk_start += self.chunk_size;
}
if chunk_start > 0 {
self.buffer.drain(..chunk_start);
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
if self.buffer.len() > 0 {
self.inner.write_all(&encrypt(&self.buffer, &self.key, &self.nonce))?;
self.nonce.increment();
}
self.buffer.flush()
}
}
pub struct Reader<R: Read> {
rest_buffer: Vec<u8>,
chunk_buffer: Vec<u8>,
key: Key,
nonce: Nonce,
inner: R,
}
impl<R: Read> Reader<R> {
pub fn new(inner: R, key: Key, nonce: Nonce, chunk_size: usize) -> Self {
Reader {
rest_buffer: Vec::new(),
chunk_buffer: vec![0; chunk_size + TAGBYTES],
key,
nonce,
inner,
}
}
#[allow(unused)]
pub fn into_inner(self) -> R {
self.inner
}
fn _read_chunk(&mut self) -> io::Result<usize> {
let mut read = 0;
while read < self.chunk_buffer.len() {
match self.inner.read(&mut self.chunk_buffer[read..]) {
Ok(0) => break,
Ok(n) => read += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => Err(e)?
}
}
if read == 0 {
Err(io::Error::new(io::ErrorKind::UnexpectedEof, "No more crypto chucks to consume"))
} else {
Ok(read)
}
}
}
impl<R: Read> Read for Reader<R> {
fn | (&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut pos = 0;
// Consume from rest buffer
if self.rest_buffer.len() > 0 {
let to_copy = cmp::min(self.rest_buffer.len(), buf.len() - pos);
buf[pos..pos + to_copy].copy_from_slice(&self.rest_buffer[..to_copy]);
pos += to_copy;
self.rest_buffer.drain(..to_copy);
}
// Consume from chunks
while pos < buf.len() {
let chunk_size = self._read_chunk()?;
let chunk = decrypt(&self.chunk_buffer[..chunk_size], &self.key, &self.nonce)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "Invalid data in crypto chunk"))?;
self.nonce.increment();
let to_copy = cmp::min(chunk.len(), buf.len() - pos);
buf[pos..pos + to_copy].copy_from_slice(&chunk[..to_copy]);
pos += to_copy;
// Save rest in rest buffer
if pos == buf.len() && to_copy < chunk.len() {
self.rest_buffer.extend(&chunk[to_copy..]);
}
}
Ok(buf.len())
}
}
#[cfg(test)]
mod tests {
extern crate rmp_serde;
use super::*;
use utils::crypto::randombytes::randombytes;
#[test]
fn derivation_argon2i_mod_produces_expected_result() {
let passphrase = "passphrase";
let salt_bytes: [u8; 32] = [
24, 62, 35, 31, 123, 241, 94, 24, 192, 110, 199, 143, 173, 20, 23, 102,
184, 99, 221, 64, 247, 230, 11, 253, 10, 7, 80, 236, 185, 249, 110, 187
];
let key_bytes: [u8; 32] = [
148, 89, 76, 239, 127, 103, 13, 86, 84, 217, 216, 13, 223, 141, 225, 41,
223, 126, 145, 138, 174, 31, 142, 199, 81, 12, 40, 201, 67, 8, 6, 251
];
let res = derive_key(
passphrase,
&pwhash_argon2i13::Salt::from_slice(&salt_bytes).unwrap(),
&KeyDerivationMethod::ARGON2I_MOD,
).unwrap();
assert_eq!(res, Key::new(key_bytes))
}
#[test]
fn derivation_argon2i_int_produces_expected_result() {
let passphrase = "passphrase";
let salt_bytes: [u8; 32] = [
24, 62, 35, 31, 123, 241, 94, 24, 192, 110, 199, 143, 173, 20, 23, 102,
184, 99, 221, 64, 247, 230, 11, 253, 10, 7, 80, 236, 185, 249, 110, 187
];
let key_bytes: [u8; 32] = [
247, 55, 177, 252, 244, 130, 218, 129, 113, 206, 72, 44, 29, 68, 134, 215,
249, 233, 131, 199, 38, 87, 69, 217, 156, 217, 10, 160, 30, 148, 80, 160
];
let res = derive_key(
passphrase,
&pwhash_argon2i13::Salt::from_slice(&salt_bytes).unwrap(),
&KeyDerivationMethod::ARGON2I_INT,
).unwrap();
assert_eq!(res, Key::new(key_bytes))
}
#[test]
fn gen_nonce_and_encrypt_decrypt_works() {
let data = randombytes(100);
let key = gen_key();
let (c, nonce) = gen_nonce_and_encrypt(&data, &key);
let u = decrypt(&c, &key, &nonce).unwrap();
assert_eq!(data, u);
}
#[test]
pub fn gen_nonce_and_encrypt_detached_decrypt_detached_works() {
let data = randombytes(100);
let key = gen_key();
let aad= randombytes(100);
let (c, nonce, tag) = gen_nonce_and_encrypt_detached(&data, aad.as_slice(), &key);
let u = decrypt_detached(&c, &key, &nonce, &tag, Some(aad.as_slice())).unwrap();
assert_eq!(data, u);
}
#[test]
fn encrypt_decrypt_works_for_nonce() {
let data = randombytes(16);
let key = gen_key();
let nonce = gen_nonce();
let c = encrypt(&data, &key, &nonce);
let u = decrypt(&c, &key, &nonce).unwrap();
assert_eq!(data, u)
}
#[test]
fn nonce_serialize_deserialize_works() {
let nonce = gen_nonce();
let serialized = rmp_serde::to_vec(&nonce).unwrap();
let deserialized: Nonce = rmp_serde::from_slice(&serialized).unwrap();
assert_eq!(serialized.len(), NONCEBYTES + 2);
assert_eq!(nonce, deserialized)
}
#[test]
fn key_serialize_deserialize_works() {
let key = gen_key();
let serialized = rmp_serde::to_vec(&key).unwrap();
let deserialized: Key = rmp_serde::from_slice(&serialized).unwrap();
assert_eq!(serialized.len(), KEYBYTES + 2);
assert_eq!(key, deserialized)
}
#[test]
fn writer_reader_works_for_less_than_one_chunk() {
let plain = randombytes(7);
let key = gen_key();
let nonce = gen_nonce();
let mut writer = Writer::new(Vec::<u8>::new(), key.clone(), nonce.clone(), 10);
writer.write_all(&plain).unwrap();
writer.flush().unwrap();
let encrypted = writer.into_inner();
assert_eq!(encrypted.len(), 7 + TAGBYTES);
let mut decrypted = vec![0u8; 7];
let mut reader = Reader::new(&encrypted[..], key, nonce, 10);
reader.read_exact(&mut decrypted).unwrap();
assert_eq!(plain, decrypted);
}
#[test]
fn writer_reader_works_for_exact_one_chunk() {
let plain = randombytes(10);
let key = gen_key();
let nonce = gen_nonce();
let mut writer = Writer::new(Vec::<u8>::new(), key.clone(), nonce.clone(), 10);
writer.write_all(&plain).unwrap();
writer.flush().unwrap();
let encrypted = writer.into_inner();
assert_eq!(encrypted.len(), 10 + TAGBYTES);
let mut decrypted = vec![0u8; 10];
let mut reader = Reader::new(&encrypted[..], key, nonce, 10);
reader.read_exact(&mut decrypted).unwrap();
assert_eq!(plain, decrypted);
}
#[test]
fn writer_reader_works_for_one_to_two_chunks() {
let plain = randombytes(13);
let key = gen_key();
let nonce = gen_nonce();
let mut writer = Writer::new(Vec::<u8>::new(), key.clone(), nonce.clone(), 10);
writer.write_all(&plain).unwrap();
writer.flush().unwrap();
let encrypted = writer.into_inner();
assert_eq!(encrypted.len(), 13 + 2 * TAGBYTES);
let mut decrypted = vec![0u8; 13];
let mut reader = Reader::new(&encrypted[..], key, nonce, 10);
reader.read_exact(&mut decrypted).unwrap();
assert_eq!(plain, decrypted);
}
#[test]
fn writer_reader_works_for_exact_two_chunks() {
let plain = randombytes(20);
let key = gen_key();
let nonce = gen_nonce();
let mut writer = Writer::new(Vec::<u8>::new(), key.clone(), nonce.clone(), 10);
writer.write_all(&plain).unwrap();
writer.flush().unwrap();
let encrypted = writer.into_inner();
assert_eq!(encrypted.len(), 20 + 2 * TAGBYTES);
let mut decrypted = vec![0u8; 20];
let mut reader = Reader::new(&encrypted[..], key, nonce, 10);
reader.read_exact(&mut decrypted).unwrap();
assert_eq!(plain, decrypted);
}
}
| read |
test.py | # import gevent.monkey
# gevent.monkey.patch_socket()
from pyEtherCAT import MasterEtherCAT
import time
import os
#============================================================================#
# C95用の簡易EtherCATパッケージです。
# 本来は細かいパケットに付いて理解を深めた上で仕組みを構築していきますが、
# 説明も実験も追いつかず、ひとまずGPIOで高速にON/OFF出来る部分だけを纏めました。
# 動作は Linux(RaspberryPi含む) にて Python3 で動作します。
# sudo python3 test03.py
#============================================================================#
# ここから簡易ライブラリ
#============================================================================#
def EtherCAT_Init(nic):
cat = MasterEtherCAT.MasterEtherCAT(nic) # ネットワークカードのアドレスを記載
return cat
def EtherCAT_SetUp(cat):
cat.EEPROM_SetUp(cat.ADP) # EEPROMの設定、特に変更不要
cat.EEPROM_Stasus(enable=0x00, command=0x04) # EEPROMの設定、特 | ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0004 # 4h: 安全動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0008 # 8h: 動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIOMode(cat, data):
ADDR = 0x0F00 # デジタル I/O 出力データレジスタ
# data = 0x00FF # 出力データ
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIO_Out(cat, data):
ADDR = 0x0F10
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[data & 0xFF, (data >> 8) & 0xFF])
#(DATA,WKC) = cat.socket_read()
#============================================================================#
# ここまで 簡易ライブラリ
#============================================================================#
def main():
cat = EtherCAT_Init("eth0") # EtherCATのネットワーク初期設定
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 # PCから1台目は0、2台目以降は-1していく
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 1 # 例 これは2台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 2 # 例 これは3台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
# -- 1台目のLEDをシフトする
TIME = 0.1
cat.ADP = 0x0000
flag = 0
CNT = 0
try:
while 1:
# time.sleep(TIME)
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
# for i in range(16):
# time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0001<<i);
# for i in range(3):
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0000);
# for i in range(0xFFFF):
# EtherCAT_GPIO_Out(cat,i);
except KeyboardInterrupt:
EtherCAT_GPIO_Out(cat, 0x0000)
print("")
print("End.")
if __name__ == "__main__":
main()
| に変更不要
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, |
index.ts | import { BigNumberish } from '@ethersproject/bignumber';
import { formatUnits } from '@ethersproject/units';
import { Multicaller } from '../../utils';
export const author = 'gamiumworld';
export const version = '0.1.0';
const tokenAbi = [
'function balanceOf(address _owner) view returns (uint256 balance)'
];
const stakingAbi = [
'function totalStakeTokenDeposited(address user) view returns (uint256)'
];
const liquidityPoolAbi = [
'function getReserves() view returns (uint112 _reserve0, uint112 _reserve1, uint32 _blockTimestampLast)',
'function totalSupply() view returns (uint256)'
];
export async function strategy(
space,
network,
provider,
addresses,
options,
snapshot
) {
const blockTag = typeof snapshot === 'number' ? snapshot : 'latest';
options.token = options.token || '0x5B6bf0c7f989dE824677cFBD507D9635965e9cD3';
options.lp_token =
options.lp_token || '0xEdeec0ED10Abee9b5616bE220540CAb40C9d991E';
options.staking_token =
options.staking_token || '0x8a3FB54dE0df64915FD66B55e1594141C1A880AB';
options.staking_pair =
options.staking_pair || '0xaD0916e7Ba7100629EAe9143e035F98ab5EA4ABd';
options.symbol = options.symbol || 'GMM';
options.decimals = options.decimals || 18;
const liquidityPoolMulticaller = new Multicaller(
network,
provider,
liquidityPoolAbi,
{ blockTag }
);
liquidityPoolMulticaller.call(
'lpTotalSupply',
options.lp_token,
'totalSupply'
);
liquidityPoolMulticaller.call('lpReserves', options.lp_token, 'getReserves');
const {
lpTotalSupply,
lpReserves
} = await liquidityPoolMulticaller.execute();
const liquidityPoolTokenRatio =
parseFloat(formatUnits(lpReserves[0], options.decimals)) /
parseFloat(formatUnits(lpTotalSupply, options.decimals));
const tokenMulticaller = new Multicaller(network, provider, tokenAbi, {
blockTag
});
const stakingTokenMulticaller = new Multicaller(
network,
provider,
stakingAbi,
{ blockTag }
);
const stakingPairMulticaller = new Multicaller(
network,
provider,
stakingAbi,
{ blockTag }
); | address,
options.staking_pair,
'totalStakeTokenDeposited',
[address]
);
stakingTokenMulticaller.call(
address,
options.staking_token,
'totalStakeTokenDeposited',
[address]
);
tokenMulticaller.call(address, options.token, 'balanceOf', [address]);
});
const [stakingPairResponse, stakingTokenResponse, tokenResponse]: [
Record<string, BigNumberish>,
Record<string, BigNumberish>,
Record<string, BigNumberish>
] = await Promise.all([
stakingPairMulticaller.execute(),
stakingTokenMulticaller.execute(),
tokenMulticaller.execute()
]);
return Object.fromEntries(
addresses.map((address) => {
const tokenBalance = parseFloat(
formatUnits(tokenResponse[address], options.decimals)
);
const stakingTokenBalance = parseFloat(
formatUnits(stakingTokenResponse[address], options.decimals)
);
const stakingPairBalance =
parseFloat(
formatUnits(stakingPairResponse[address], options.decimals)
) *
(1 + liquidityPoolTokenRatio);
return [
address,
tokenBalance + 2 * stakingTokenBalance + 2 * stakingPairBalance
];
})
);
} |
addresses.forEach((address) => {
stakingPairMulticaller.call( |
org-chart.js | sap.ui.define(['sap/ui/webc/common/thirdparty/base/asset-registries/Icons'], function (Icons) { 'use strict';
const name = "org-chart";
const pathData = "M484 341q28 0 28 28v113q0 13-7.5 21t-20.5 8H313q-13 0-20.5-8t-7.5-21V369q0-28 28-28h57v-57H143v57h57q28 0 28 28v113q0 13-7.5 21t-20.5 8H29q-13 0-20.5-8T1 482V369q0-28 28-28h57v-86q0-28 29-28h113v-57h-85q-28 0-28-28V28q0-28 28-28h227q28 0 28 28v114q0 28-28 28h-85v57h113q13 0 21 7.5t8 20.5v86h57zM171 113h171V57H171v56zm0 284H58v57h113v-57zm284 0H342v57h113v-57z";
const ltr = false;
const collection = "SAP-icons-v5";
const packageName = "@ui5/webcomponents-icons";
Icons.registerIcon(name, { pathData, ltr, collection, packageName });
var pathDataV4 = { pathData };
|
}); | return pathDataV4; |
index.tsx | import { useEffect, useState } from 'react';
import { useHistory, useParams } from 'react-router-dom';
import toast from 'react-hot-toast';
import logoImgLight from '../../assets/images/logo-light.svg';
import logoImgDark from '../../assets/images/logo-dark.svg';
import { RoomCode } from '../../components/RoomCode';
import { useAuth } from '../../hooks/useAuth';
import {
SvgLoginRequiredDark
} from '../../components/iconComponents/Login/LoginRequiredDark';
import {
SvgLoginRequiredLight
} from '../../components/iconComponents/Login/LoginRequiredLight';
import {
EmptyQuestions
} from '../../components/iconComponents/EmptyQuestions/EmptyQuestions';
import './styles.scss';
import { useTheme } from '../../hooks/useTheme';
import { ButtonToggleTheme } from '../../components/ButtonToggleTheme';
import { Question } from '../../components/Question';
import { useRoom } from '../../hooks/useRoom';
import { Button } from '../../components/Button';
import { Modal } from '../../components/Modal';
import { database } from '../../services/firebase';
import { Header } from '../../components/Header';
type RoomParams = {
id: string;
}
export function AdminRoom() {
const history = useHistory();
const params = useParams<RoomParams>();
const roomId = params.id;
const {
questions,
title,
handleUpdatedQuestion,
handleEndRoom
} = useRoom(roomId);
const [questionIdModalOpen, setQuestionIdModalOpen] = useState<string | undefined>();
const { user, signInWithGoogle } = useAuth();
const { theme } = useTheme();
useEffect(() => {
if (user) {
const roomRef = database.ref(`rooms/${roomId}`);
roomRef
.once('value', room => {
const valueRoom = room.val();
const isAuthor = valueRoom.authorId === user?.id;
if (!isAuthor) {
history.push(`/rooms/${roomId}`);
}
});
}
}, [history, roomId, user]);
async function handleEndRoomAdmin() {
await handleEndRoom();
history.push('/');
toast.success('Sala encerrada com sucesso');
}
async function handleLogin() { |
async function handleDeleteQuestion(questionId?: string) {
if (questionId) {
await database.ref(`rooms/${roomId}/questions/${questionId}`)
.remove();
setQuestionIdModalOpen(undefined);
}
}
async function handleHighlightQuestion(questionId: string) {
await handleUpdatedQuestion(questionId, 'highlighted');
}
async function handleCheckQuestionAsAnswered(questionId: string) {
await handleUpdatedQuestion(questionId, 'answered');
}
return (
<div id='page-admin-room' className={theme}>
<ButtonToggleTheme />
<Header>
<div className="content">
<img src={theme === 'light' ? logoImgLight : logoImgDark} alt='Letmeask' />
<div>
{user && <RoomCode code={roomId} />}
{user && <Button isOutlined onClick={handleEndRoomAdmin}>Encerrar</Button>}
</div>
</div>
</Header>
<main className="content">
{user && (
<div className="room-title">
<h1>Sala {title}</h1>
{questions.length > 0 && <span>{questions.length} pergunta(s)</span>}
</div>
)}
{!user && (
<div className="login-required">
{theme === 'dark' ?
<SvgLoginRequiredDark aria-label='Faça login para continuar' /> :
<SvgLoginRequiredLight aria-label='Faça login para continuar' />
}
<span>Faça login para acessar essa página</span>
<Button onClick={handleLogin}>Fazer login</Button>
</div>
)}
<div className="question-list">
{user && (
questions.length > 0 ? (
questions.map(question => (
<Question key={question.id}
isAnswered={question.isAnswered}
isHighlighted={question.isHighlighted}
content={question.content}
author={question.author}
>
<button
type="button"
onClick={async () => await handleCheckQuestionAsAnswered(question.id)}
>
<svg className='icon-answered' width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="12.0003" cy="11.9998" r="9.00375" stroke="#737380" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
<path d="M8.44287 12.3391L10.6108 14.507L10.5968 14.493L15.4878 9.60193" stroke="#737380" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
</svg>
</button>
{!question.isAnswered && (
<button
type='button'
onClick={async () => await handleHighlightQuestion(question.id)}
>
<svg className='icon-select' width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fillRule="evenodd" clipRule="evenodd" d="M12 17.9999H18C19.657 17.9999 21 16.6569 21 14.9999V6.99988C21 5.34288 19.657 3.99988 18 3.99988H6C4.343 3.99988 3 5.34288 3 6.99988V14.9999C3 16.6569 4.343 17.9999 6 17.9999H7.5V20.9999L12 17.9999Z" stroke="#737380" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
</svg>
</button>
)}
<button
type='button'
onClick={() => setQuestionIdModalOpen(question.id)}
>
<svg className='icon-delete' width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3 5.99988H5H21" stroke="#737380" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
<path d="M8 5.99988V3.99988C8 3.46944 8.21071 2.96074 8.58579 2.58566C8.96086 2.21059 9.46957 1.99988 10 1.99988H14C14.5304 1.99988 15.0391 2.21059 15.4142 2.58566C15.7893 2.96074 16 3.46944 16 3.99988V5.99988M19 5.99988V19.9999C19 20.5303 18.7893 21.039 18.4142 21.4141C18.0391 21.7892 17.5304 21.9999 17 21.9999H7C6.46957 21.9999 5.96086 21.7892 5.58579 21.4141C5.21071 21.039 5 20.5303 5 19.9999V5.99988H19Z" stroke="#737380" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
</svg>
</button>
</Question>
))
) : (
<div className="no-questions">
<EmptyQuestions />
<span>Nenhuma pergunta por aqui...</span>
<p>Envie o código da sala para seus amigos e comece a responder perguntas!</p>
</div>
)
)}
</div>
</main>
<Modal
theme={theme}
isOpen={questionIdModalOpen !== undefined}
onRequestClose={() => setQuestionIdModalOpen(undefined)}
>
<div className='modal-content'>
<svg width="48" height="48" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3 5.99988H5H21" stroke="#e74b5d" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
<path d="M8 5.99988V3.99988C8 3.46944 8.21071 2.96074 8.58579 2.58566C8.96086 2.21059 9.46957 1.99988 10 1.99988H14C14.5304 1.99988 15.0391 2.21059 15.4142 2.58566C15.7893 2.96074 16 3.46944 16 3.99988V5.99988M19 5.99988V19.9999C19 20.5303 18.7893 21.039 18.4142 21.4141C18.0391 21.7892 17.5304 21.9999 17 21.9999H7C6.46957 21.9999 5.96086 21.7892 5.58579 21.4141C5.21071 21.039 5 20.5303 5 19.9999V5.99988H19Z" stroke="#e74b5d" strokeWidth="1.5" strokeLinecap="round" strokeLinejoin="round" />
</svg>
<strong>Excluir pergunta</strong>
<span>Tem certeza que você deseja excluir esta pergunta?</span>
<div className="buttons">
<button className='button-cancel' onClick={() => setQuestionIdModalOpen(undefined)}>Cancelar</button>
<button className='button-delete' onClick={() => handleDeleteQuestion(questionIdModalOpen)}>Sim, excluir</button>
</div>
</div>
</Modal>
</div>
)
}
// Algoritmo de Reconciliação | if (!user) {
await signInWithGoogle();
}
} |
DefaultFontStyles.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var glamorExports_1 = require("../glamorExports");
var language_1 = require("@uifabric/utilities/lib/language");
// Default urls.
var DefaultBaseUrl = 'https://static2.sharepointonline.com/files/fabric/assets';
// Fallback fonts, if specified system or web fonts are unavailable.
var FontFamilyFallbacks = "'Segoe UI', -apple-system, BlinkMacSystemFont, 'Roboto', 'Helvetica Neue', sans-serif";
// Font face names to be registered.
var FontNameArabic = 'Segoe UI Web (Arabic)';
var FontNameCyrillic = 'Segoe UI Web (Cyrillic)';
var FontNameEastEuropean = 'Segoe UI Web (East European)';
var FontNameGreek = 'Segoe UI Web (Greek)';
var FontNameHebrew = 'Segoe UI Web (Hebrew)';
var FontNameThai = 'Leelawadee UI Web';
var FontNameVietnamese = 'Segoe UI Web (Vietnamese)';
var FontNameWestEuropean = 'Segoe UI Web (West European)';
var FontNameSelawik = 'Selawik Web';
// Font families with fallbacks, for the general regions.
var FontFamilyArabic = "'" + FontNameArabic + "'";
var FontFamilyChineseSimplified = "'Microsoft Yahei', Verdana, Simsun";
var FontFamilyChineseTraditional = "'Microsoft Jhenghei', Pmingliu";
var FontFamilyCyrillic = "'" + FontNameCyrillic + "'";
var FontFamilyEastEuropean = "'" + FontNameEastEuropean + "'";
var FontFamilyGreek = "'" + FontNameGreek + "'";
var FontFamilyHebrew = "'" + FontNameHebrew + "'";
var FontFamilyHindi = "'Nirmala UI'";
var FontFamilyJapanese = "'Yu Gothic', 'Meiryo UI', Meiryo, 'MS Pgothic', Osaka";
var FontFamilyKorean = "'Malgun Gothic', Gulim";
var FontFamilySelawik = "'" + FontNameSelawik + "'";
var FontFamilyThai = "'Leelawadee UI Web', 'Kmer UI'";
var FontFamilyVietnamese = "'" + FontNameVietnamese + "'";
var FontFamilyWestEuropean = "'" + FontNameWestEuropean + "'";
// Mapping of language prefix to to font family.
var LanguageToFontMap = {
'ar': FontFamilyArabic,
'bg': FontFamilyCyrillic,
'cs': FontFamilyEastEuropean,
'el': FontFamilyGreek,
'et': FontFamilyEastEuropean,
'he': FontFamilyHebrew,
'hi': FontFamilyHindi,
'hr': FontFamilyEastEuropean,
'hu': FontFamilyEastEuropean,
'ja': FontFamilyJapanese,
'kk': FontFamilyEastEuropean,
'ko': FontFamilyKorean,
'lt': FontFamilyEastEuropean,
'lv': FontFamilyEastEuropean,
'pl': FontFamilyEastEuropean,
'ru': FontFamilyCyrillic,
'sk': FontFamilyEastEuropean,
'sr-latn': FontFamilyEastEuropean,
'th': FontFamilyThai,
'tr': FontFamilyEastEuropean,
'uk': FontFamilyCyrillic,
'vi': FontFamilyVietnamese,
'zh-hans': FontFamilyChineseSimplified,
'zh-hant': FontFamilyChineseTraditional,
};
var FontFileVersion = 2.38;
// Standard font sizes.
var FontSizes;
(function (FontSizes) {
FontSizes.mini = '10px';
FontSizes.xSmall = '11px';
FontSizes.small = '12px';
FontSizes.smallPlus = '13px';
FontSizes.medium = '14px';
FontSizes.mediumPlus = '15px';
FontSizes.icon = '16px';
FontSizes.large = '17px';
FontSizes.xLarge = '21px';
FontSizes.xxLarge = '28px';
FontSizes.superLarge = '42px';
FontSizes.mega = '72px';
})(FontSizes = exports.FontSizes || (exports.FontSizes = {}));
// Standard font weights.
var FontWeights;
(function (FontWeights) {
FontWeights.light = 100;
FontWeights.semilight = 300;
FontWeights.regular = 400;
FontWeights.semibold = 600;
FontWeights.bold = 700;
})(FontWeights = exports.FontWeights || (exports.FontWeights = {}));
// Standard font styling.
exports.DefaultFontStyles = {
tiny: _createFont(FontSizes.mini, FontWeights.semibold),
xSmall: _createFont(FontSizes.xSmall, FontWeights.regular),
small: _createFont(FontSizes.small, FontWeights.regular),
smallPlus: _createFont(FontSizes.smallPlus, FontWeights.regular),
medium: _createFont(FontSizes.medium, FontWeights.regular),
mediumPlus: _createFont(FontSizes.mediumPlus, FontWeights.regular),
large: _createFont(FontSizes.large, FontWeights.semilight),
xLarge: _createFont(FontSizes.xLarge, FontWeights.light),
xxLarge: _createFont(FontSizes.xxLarge, FontWeights.light),
superLarge: _createFont(FontSizes.superLarge, FontWeights.light),
mega: _createFont(FontSizes.mega, FontWeights.light),
icon: {
fontFamily: '"FabricMDL2Icons"',
fontWeight: FontWeights.regular,
fontStyle: 'normal'
}
};
function _getFontFamily() {
var language = language_1.getLanguage();
var fontFamily = FontFamilyWestEuropean;
for (var lang in LanguageToFontMap) {
if (LanguageToFontMap.hasOwnProperty(lang) && language && lang.indexOf(language) === 0) {
// tslint:disable-next-line:no-any
fontFamily = LanguageToFontMap[lang];
break;
}
}
return fontFamily + ", " + FontFamilyFallbacks;
}
function _createFont(size, weight) {
return {
fontFamily: _getFontFamily(),
MozOsxFontSmoothing: 'grayscale',
WebkitFontSmoothing: 'antialiased',
fontSize: size,
fontWeight: weight
};
}
function _registerFontFace(fontFamily, url, fontWeight) {
fontFamily = "'" + fontFamily + "'";
glamorExports_1.fontFace({
fontFamily: fontFamily,
src: "url('" + url + ".woff2') format('woff2')," +
("url('" + url + ".woff') format('woff')"),
fontWeight: fontWeight,
fontStyle: 'normal'
});
}
function _registerFontFaceSet(baseUrl, fontFamily, cdnFolder, cdnFontName) {
if (cdnFontName === void 0) { cdnFontName = 'segoeui'; }
var urlBase = baseUrl + "/" + cdnFolder + "/" + cdnFontName;
_registerFontFace(fontFamily, urlBase + '-light', FontWeights.light);
_registerFontFace(fontFamily, urlBase + '-semilight', FontWeights.semilight);
_registerFontFace(fontFamily, urlBase + '-regular', FontWeights.regular);
_registerFontFace(fontFamily, urlBase + '-semibold', FontWeights.semibold);
}
function _registerDefaultFontFaces() {
var baseUrl = _getFontBaseUrl();
if (baseUrl) {
var fontUrl = baseUrl + "/fonts";
var iconUrl = baseUrl + "/icons";
// Produce @font-face definitions for all supported web fonts.
_registerFontFaceSet(fontUrl, FontNameThai, 'leelawadeeui-thai', 'leelawadeeui');
_registerFontFaceSet(fontUrl, FontNameArabic, 'segoeui-arabic');
_registerFontFaceSet(fontUrl, FontNameCyrillic, 'segoeui-cyrillic');
_registerFontFaceSet(fontUrl, FontNameEastEuropean, 'segoeui-easteuropean');
_registerFontFaceSet(fontUrl, FontNameGreek, 'segoeui-greek');
_registerFontFaceSet(fontUrl, FontNameHebrew, 'segoeui-hebrew');
_registerFontFaceSet(fontUrl, FontNameVietnamese, 'segoeui-vietnamese');
_registerFontFaceSet(fontUrl, FontNameWestEuropean, 'segoeui-westeuropean');
_registerFontFaceSet(fontUrl, FontFamilySelawik, 'selawik', 'selawik');
// Leelawadee UI (Thai) does not have a 'light' weight, so we override
// the font-face generated above to use the 'semilight' weight instead.
_registerFontFace('Leelawadee UI Web', fontUrl + "/leelawadeeui-thai/leelawadeeui-semilight", FontWeights.light); | _registerFontFace('Leelawadee UI Web', fontUrl + "/leelawadeeui-thai/leelawadeeui-bold", FontWeights.semibold);
// Register icon urls.
_registerFontFace('FabricMDL2Icons', iconUrl + "/fabricmdl2icons-" + FontFileVersion, FontWeights.regular);
}
}
/**
* Reads the fontBaseUrl from window.FabricConfig.fontBaseUrl or falls back to a default.
*/
function _getFontBaseUrl() {
var win = typeof window !== 'undefined' ? window : undefined;
// tslint:disable-next-line:no-string-literal no-any
var fabricConfig = win ? win['FabricConfig'] : undefined;
return (fabricConfig && fabricConfig.fontBaseUrl !== undefined) ? fabricConfig.fontBaseUrl : DefaultBaseUrl;
}
/**
* Register the font faces.
*/
_registerDefaultFontFaces();
//# sourceMappingURL=DefaultFontStyles.js.map | // Leelawadee UI (Thai) does not have a 'semibold' weight, so we override
// the font-face generated above to use the 'bold' weight instead. |
controls.js | function Controls() {
let _matrix;
let _stateManager;
const THEMES = ['red-leds',
'yellow-leds',
'green-leds',
'blue-leds',
'white-leds',
'black-leds'
];
function init(stateManager, matrix) {
_stateManager = stateManager;
_matrix = matrix;
$('#invert-button').click(_matrix.invert);
$('#clear-button').click(_matrix.clear);
$('#shift-up-button').click(_matrix.shiftUp);
$('#shift-down-button').click(_matrix.shiftDown);
$('#shift-left-button').click(_matrix.shiftLeft);
$('#shift-right-button').click(_matrix.shiftRight);
$('.leds-case').click(function () {
setColor($(this).attr('id'));
const themeName = THEMES.indexOf($(this).attr('id'));
updateState({color: themeName});
});
$('#width-input').change(function () {
_matrix.setup($('#width-input').val() | 0, $('#height-input').val() | 0);
updateState({width: $('#width-input').val() | 0});
});
$('#height-input').change(function () {
_matrix.setup($('#width-input').val() | 0, $('#height-input').val() | 0);
updateState({height: $('#height-input').val() | 0});
});
}
function setColor(color) {
$('body').removeClass('red-leds yellow-leds green-leds blue-leds white-leds black-leds').addClass(color);
}
function stateChanged(state) {
setColor(THEMES[state.color]);
}
function updateState(state) {
_stateManager.updateState(state)
}
return {
init: init,
stateChanged: stateChanged,
} | } |
|
__init__.py | from django.contrib.sites.models import Site, get_current_site
from django.core import urlresolvers, paginator
from django.core.exceptions import ImproperlyConfigured
import urllib
PING_URL = "http://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
|
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urlresolvers.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index')
except urlresolvers.NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap')
except urlresolvers.NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urllib.urlencode({'sitemap':url})
urllib.urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
if not hasattr(self, "_paginator"):
self._paginator = paginator.Paginator(self.items(), self.limit)
return self._paginator
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None):
if site is None:
if Site._meta.installed:
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured("In order to use Sitemaps you must either use the sites framework or pass in a Site or RequestSite object in your view code.")
urls = []
for item in self.paginator.page(page).object_list:
loc = "http://%s%s" % (site.domain, self.__get('location', item))
priority = self.__get('priority', item, None)
url_info = {
'location': loc,
'lastmod': self.__get('lastmod', item, None),
'changefreq': self.__get('changefreq', item, None),
'priority': str(priority is not None and priority or '')
}
urls.append(url_info)
return urls
class FlatPageSitemap(Sitemap):
def items(self):
current_site = Site.objects.get_current()
return current_site.flatpage_set.filter(registration_required=False)
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field', None)
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
| pass |
rpc_client.rs | use crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::RpcAccountInfoConfig,
rpc_config::{
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
RpcTokenAccountsFilter,
},
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
};
use bincode::serialize;
use indicatif::{ProgressBar, ProgressStyle};
use log::*;
use serde_json::{json, Value};
use solana_account_decoder::{
parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount},
UiAccount, UiAccountData, UiAccountEncoding,
};
use solana_sdk::{
account::Account,
clock::{
Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
MAX_HASH_AGE_IN_SECONDS,
},
commitment_config::{CommitmentConfig, CommitmentLevel},
epoch_info::EpochInfo,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, uses_durable_nonce, Transaction},
};
use solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
net::SocketAddr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
};
pub struct RpcClient {
sender: Box<dyn RpcSender + Send + Sync + 'static>,
default_cluster_transaction_encoding: RwLock<Option<UiTransactionEncoding>>,
}
fn | (
transaction: &Transaction,
encoding: UiTransactionEncoding,
) -> ClientResult<String> {
let serialized = serialize(transaction)
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
let encoded = match encoding {
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
UiTransactionEncoding::Base64 => base64::encode(serialized),
_ => {
return Err(ClientErrorKind::Custom(format!(
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
encoding
))
.into())
}
};
Ok(encoded)
}
impl RpcClient {
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self {
Self {
sender: Box::new(sender),
default_cluster_transaction_encoding: RwLock::new(None),
}
}
pub fn new(url: String) -> Self {
Self::new_sender(HttpSender::new(url))
}
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
Self::new_sender(HttpSender::new_with_timeout(url, timeout))
}
pub fn new_mock(url: String) -> Self {
Self::new_sender(MockSender::new(url))
}
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
Self::new_sender(MockSender::new_with_mocks(url, mocks))
}
pub fn new_socket(addr: SocketAddr) -> Self {
Self::new(get_rpc_request_str(addr, false))
}
pub fn new_socket_with_timeout(addr: SocketAddr, timeout: Duration) -> Self {
let url = get_rpc_request_str(addr, false);
Self::new_with_timeout(url, timeout)
}
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
Ok(self
.confirm_transaction_with_commitment(signature, CommitmentConfig::default())?
.value)
}
pub fn confirm_transaction_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> RpcResult<bool> {
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
Ok(Response {
context,
value: value[0]
.as_ref()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|result| result.status.is_ok())
.unwrap_or_default(),
})
}
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
}
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
let default_cluster_transaction_encoding =
self.default_cluster_transaction_encoding.read().unwrap();
if let Some(encoding) = *default_cluster_transaction_encoding {
Ok(encoding)
} else {
drop(default_cluster_transaction_encoding);
let cluster_version = self.get_version().map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
let cluster_version =
semver::Version::parse(&cluster_version.solana_core).map_err(|e| {
RpcError::RpcRequestError(format!("failed to parse cluster version: {}", e))
})?;
// Prefer base64 since 1.3.16
let encoding = if cluster_version < semver::Version::new(1, 3, 16) {
UiTransactionEncoding::Base58
} else {
UiTransactionEncoding::Base64
};
*self.default_cluster_transaction_encoding.write().unwrap() = Some(encoding);
Ok(encoding)
}
}
pub fn send_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSendTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
let signature_base58_str: String = self.send(
RpcRequest::SendTransaction,
json!([serialized_encoded, config]),
)?;
let signature = signature_base58_str
.parse::<Signature>()
.map_err(|err| Into::<ClientError>::into(RpcError::ParseError(err.to_string())))?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
}
}
pub fn simulate_transaction(
&self,
transaction: &Transaction,
) -> RpcResult<RpcSimulateTransactionResult> {
self.simulate_transaction_with_config(transaction, RpcSimulateTransactionConfig::default())
}
pub fn simulate_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSimulateTransactionConfig,
) -> RpcResult<RpcSimulateTransactionResult> {
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSimulateTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
self.send(
RpcRequest::SimulateTransaction,
json!([serialized_encoded, config]),
)
}
pub fn get_signature_status(
&self,
signature: &Signature,
) -> ClientResult<Option<transaction::Result<()>>> {
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
}
pub fn get_signature_statuses(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]))
}
pub fn get_signature_statuses_with_history(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(
RpcRequest::GetSignatureStatuses,
json!([signatures, {
"searchTransactionHistory": true
}]),
)
}
pub fn get_signature_status_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
)?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
pub fn get_signature_status_with_commitment_and_history(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
search_transaction_history: bool,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()], {
"searchTransactionHistory": search_transaction_history
}]),
)?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
pub fn get_slot(&self) -> ClientResult<Slot> {
self.get_slot_with_commitment(CommitmentConfig::default())
}
pub fn get_slot_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<Slot> {
self.send(RpcRequest::GetSlot, json!([commitment_config]))
}
pub fn supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcSupply> {
self.send(RpcRequest::GetSupply, json!([commitment_config]))
}
pub fn total_supply(&self) -> ClientResult<u64> {
self.total_supply_with_commitment(CommitmentConfig::default())
}
pub fn total_supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(RpcRequest::GetTotalSupply, json!([commitment_config]))
}
pub fn get_largest_accounts_with_config(
&self,
config: RpcLargestAccountsConfig,
) -> RpcResult<Vec<RpcAccountBalance>> {
self.send(RpcRequest::GetLargestAccounts, json!([config]))
}
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
self.get_vote_accounts_with_commitment(CommitmentConfig::default())
}
pub fn get_vote_accounts_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcVoteAccountStatus> {
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]))
}
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
self.send(RpcRequest::GetClusterNodes, Value::Null)
}
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
}
pub fn get_confirmed_block_with_encoding(
&self,
slot: Slot,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
}
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
)
}
pub fn get_confirmed_blocks_with_limit(
&self,
start_slot: Slot,
limit: usize,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocksWithLimit,
json!([start_slot, limit]),
)
}
pub fn get_confirmed_signatures_for_address(
&self,
address: &Pubkey,
start_slot: Slot,
end_slot: Slot,
) -> ClientResult<Vec<Signature>> {
let signatures_base58_str: Vec<String> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress,
json!([address.to_string(), start_slot, end_slot]),
)?;
let mut signatures = vec![];
for signature_base58_str in signatures_base58_str {
signatures.push(
signature_base58_str.parse::<Signature>().map_err(|err| {
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
})?,
);
}
Ok(signatures)
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_confirmed_signatures_for_address2_with_config(
address,
GetConfirmedSignaturesForAddress2Config::default(),
)
}
pub fn get_confirmed_signatures_for_address2_with_config(
&self,
address: &Pubkey,
config: GetConfirmedSignaturesForAddress2Config,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcGetConfirmedSignaturesForAddress2Config {
before: config.before.map(|signature| signature.to_string()),
until: config.until.map(|signature| signature.to_string()),
limit: config.limit,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress2,
json!([address.to_string(), config]),
)?;
Ok(result)
}
pub fn get_confirmed_transaction(
&self,
signature: &Signature,
encoding: UiTransactionEncoding,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
)
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let request = RpcRequest::GetBlockTime;
let response = self.sender.send(request, json!([slot]));
response
.map(|result_json| {
if result_json.is_null() {
return Err(RpcError::ForUser(format!("Block Not Found: slot={}", slot)).into());
}
let result = serde_json::from_value(result_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!("Response block timestamp {:?} {:?}", slot, result);
Ok(result)
})
.map_err(|err| err.into_with_request(request))?
}
pub fn get_epoch_info(&self) -> ClientResult<EpochInfo> {
self.get_epoch_info_with_commitment(CommitmentConfig::default())
}
pub fn get_epoch_info_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<EpochInfo> {
self.send(RpcRequest::GetEpochInfo, json!([commitment_config]))
}
pub fn get_leader_schedule(
&self,
slot: Option<Slot>,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.get_leader_schedule_with_commitment(slot, CommitmentConfig::default())
}
pub fn get_leader_schedule_with_commitment(
&self,
slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.send(
RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
)
}
pub fn get_epoch_schedule(&self) -> ClientResult<EpochSchedule> {
self.send(RpcRequest::GetEpochSchedule, Value::Null)
}
pub fn get_identity(&self) -> ClientResult<Pubkey> {
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null)?;
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
RpcRequest::GetIdentity,
)
})
}
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
self.send(RpcRequest::GetInflationGovernor, Value::Null)
}
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
self.send(RpcRequest::GetInflationRate, Value::Null)
}
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
self.send(RpcRequest::GetVersion, Value::Null)
}
pub fn minimum_ledger_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::MinimumLedgerSlot, Value::Null)
}
pub fn send_and_confirm_transaction(
&self,
transaction: &Transaction,
) -> ClientResult<Signature> {
let signature = self.send_transaction(transaction)?;
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
self.get_recent_blockhash_with_commitment(CommitmentConfig::recent())?
.value
.0
} else {
transaction.message.recent_blockhash
};
let status = loop {
let status = self.get_signature_status(&signature)?;
if status.is_none() {
if self
.get_fee_calculator_for_blockhash_with_commitment(
&recent_blockhash,
CommitmentConfig::recent(),
)?
.value
.is_none()
{
break status;
}
} else {
break status;
}
if cfg!(not(test)) {
// Retry twice a second
sleep(Duration::from_millis(500));
}
};
if let Some(result) = status {
match result {
Ok(_) => Ok(signature),
Err(err) => Err(err.into()),
}
} else {
Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds"
.to_string(),
)
.into())
}
}
pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult<Account> {
self.get_account_with_commitment(pubkey, CommitmentConfig::default())?
.value
.ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into())
}
pub fn get_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
let response = self.sender.send(
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), config]),
);
response
.map(|result_json| {
if result_json.is_null() {
return Err(
RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into(),
);
}
let Response {
context,
value: rpc_account,
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
Ok(Response {
context,
value: account,
})
})
.map_err(|err| {
Into::<ClientError>::into(RpcError::ForUser(format!(
"AccountNotFound: pubkey={}: {}",
pubkey, err
)))
})?
}
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
Ok(self
.get_multiple_accounts_with_commitment(pubkeys, CommitmentConfig::default())?
.value)
}
pub fn get_multiple_accounts_with_commitment(
&self,
pubkeys: &[Pubkey],
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<Option<Account>>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
let pubkeys: Vec<_> = pubkeys.iter().map(|pubkey| pubkey.to_string()).collect();
let response = self.send(RpcRequest::GetMultipleAccounts, json!([pubkeys, config]))?;
let Response {
context,
value: accounts,
} = serde_json::from_value::<Response<Vec<Option<UiAccount>>>>(response)?;
let accounts: Vec<Option<Account>> = accounts
.into_iter()
.map(|rpc_account| rpc_account.map(|a| a.decode()).flatten())
.collect();
Ok(Response {
context,
value: accounts,
})
}
pub fn get_account_data(&self, pubkey: &Pubkey) -> ClientResult<Vec<u8>> {
Ok(self.get_account(pubkey)?.data)
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> ClientResult<u64> {
let request = RpcRequest::GetMinimumBalanceForRentExemption;
let minimum_balance_json = self
.sender
.send(request, json!([data_len]))
.map_err(|err| err.into_with_request(request))?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!(
"Response minimum balance {:?} {:?}",
data_len,
minimum_balance
);
Ok(minimum_balance)
}
/// Request the balance of the account `pubkey`.
pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult<u64> {
Ok(self
.get_balance_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<u64> {
self.send(
RpcRequest::GetBalance,
json!([pubkey.to_string(), commitment_config]),
)
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
self.get_program_accounts_with_config(
pubkey,
RpcProgramAccountsConfig {
filters: None,
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
..RpcAccountInfoConfig::default()
},
},
)
}
pub fn get_program_accounts_with_config(
&self,
pubkey: &Pubkey,
config: RpcProgramAccountsConfig,
) -> ClientResult<Vec<(Pubkey, Account)>> {
let accounts: Vec<RpcKeyedAccount> = self.send(
RpcRequest::GetProgramAccounts,
json!([pubkey.to_string(), config]),
)?;
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
}
/// Request the transaction count.
pub fn get_transaction_count(&self) -> ClientResult<u64> {
self.get_transaction_count_with_commitment(CommitmentConfig::default())
}
pub fn get_transaction_count_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(RpcRequest::GetTransactionCount, json!([commitment_config]))
}
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
let (blockhash, fee_calculator, _last_valid_slot) = self
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
.value;
Ok((blockhash, fee_calculator))
}
pub fn get_recent_blockhash_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
context,
value:
RpcFees {
blockhash,
fee_calculator,
last_valid_slot,
},
}) =
self.send::<Response<RpcFees>>(RpcRequest::GetFees, json!([commitment_config]))
{
(context, blockhash, fee_calculator, last_valid_slot)
} else if let Ok(Response {
context,
value:
RpcBlockhashFeeCalculator {
blockhash,
fee_calculator,
},
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
) {
(context, blockhash, fee_calculator, 0)
} else {
return Err(ClientError::new_with_request(
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
RpcRequest::GetRecentBlockhash,
));
};
let blockhash = blockhash.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetRecentBlockhash,
)
})?;
Ok(Response {
context,
value: (blockhash, fee_calculator, last_valid_slot),
})
}
pub fn get_fee_calculator_for_blockhash(
&self,
blockhash: &Hash,
) -> ClientResult<Option<FeeCalculator>> {
Ok(self
.get_fee_calculator_for_blockhash_with_commitment(
blockhash,
CommitmentConfig::default(),
)?
.value)
}
pub fn get_fee_calculator_for_blockhash_with_commitment(
&self,
blockhash: &Hash,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<FeeCalculator>> {
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string(), commitment_config]),
)?;
Ok(Response {
context,
value: value.map(|rf| rf.fee_calculator),
})
}
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
let Response {
context,
value: RpcFeeRateGovernor { fee_rate_governor },
} =
self.send::<Response<RpcFeeRateGovernor>>(RpcRequest::GetFeeRateGovernor, Value::Null)?;
Ok(Response {
context,
value: fee_rate_governor,
})
}
pub fn get_new_blockhash(&self, blockhash: &Hash) -> ClientResult<(Hash, FeeCalculator)> {
let mut num_retries = 0;
let start = Instant::now();
while start.elapsed().as_secs() < 5 {
if let Ok((new_blockhash, fee_calculator)) = self.get_recent_blockhash() {
if new_blockhash != *blockhash {
return Ok((new_blockhash, fee_calculator));
}
}
debug!("Got same blockhash ({:?}), will retry...", blockhash);
// Retry ~twice during a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
));
num_retries += 1;
}
Err(RpcError::ForUser(format!(
"Unable to get new blockhash after {}ms (retried {} times), stuck at {}",
start.elapsed().as_millis(),
num_retries,
blockhash
))
.into())
}
pub fn get_first_available_block(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetFirstAvailableBlock, Value::Null)
}
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null)?;
let hash = hash_str.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetGenesisHash,
)
})?;
Ok(hash)
}
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
Ok(self
.get_token_account_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_token_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<UiTokenAccount>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(commitment_config),
data_slice: None,
};
let response = self.sender.send(
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), config]),
);
response
.map(|result_json| {
if result_json.is_null() {
return Err(
RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into(),
);
}
let Response {
context,
value: rpc_account,
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
trace!("Response account {:?} {:?}", pubkey, rpc_account);
let response = {
if let Some(rpc_account) = rpc_account {
if let UiAccountData::Json(account_data) = rpc_account.data {
let token_account_type: TokenAccountType =
serde_json::from_value(account_data.parsed)?;
if let TokenAccountType::Account(token_account) = token_account_type {
return Ok(Response {
context,
value: Some(token_account),
});
}
}
}
Err(Into::<ClientError>::into(RpcError::ForUser(format!(
"Account could not be parsed as token account: pubkey={}",
pubkey
))))
};
response?
})
.map_err(|err| {
Into::<ClientError>::into(RpcError::ForUser(format!(
"AccountNotFound: pubkey={}: {}",
pubkey, err
)))
})?
}
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_token_account_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenAccountBalance,
json!([pubkey.to_string(), commitment_config]),
)
}
pub fn get_token_accounts_by_delegate(
&self,
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_delegate_with_commitment(
delegate,
token_account_filter,
CommitmentConfig::default(),
)?
.value)
}
pub fn get_token_accounts_by_delegate_with_commitment(
&self,
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(commitment_config),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([delegate.to_string(), token_account_filter, config]),
)
}
pub fn get_token_accounts_by_owner(
&self,
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_owner_with_commitment(
owner,
token_account_filter,
CommitmentConfig::default(),
)?
.value)
}
pub fn get_token_accounts_by_owner_with_commitment(
&self,
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(commitment_config),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([owner.to_string(), token_account_filter, config]),
)
}
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
.value)
}
pub fn get_token_supply_with_commitment(
&self,
mint: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenSupply,
json!([mint.to_string(), commitment_config]),
)
}
fn poll_balance_with_timeout_and_commitment(
&self,
pubkey: &Pubkey,
polling_frequency: &Duration,
timeout: &Duration,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
let now = Instant::now();
loop {
match self.get_balance_with_commitment(&pubkey, commitment_config) {
Ok(bal) => {
return Ok(bal.value);
}
Err(e) => {
sleep(*polling_frequency);
if now.elapsed() > *timeout {
return Err(e);
}
}
};
}
}
pub fn poll_get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.poll_balance_with_timeout_and_commitment(
pubkey,
&Duration::from_millis(100),
&Duration::from_secs(1),
commitment_config,
)
}
pub fn wait_for_balance_with_commitment(
&self,
pubkey: &Pubkey,
expected_balance: Option<u64>,
commitment_config: CommitmentConfig,
) -> Option<u64> {
const LAST: usize = 30;
for run in 0..LAST {
let balance_result = self.poll_get_balance_with_commitment(pubkey, commitment_config);
if expected_balance.is_none() {
return balance_result.ok();
}
trace!(
"wait_for_balance_with_commitment [{}] {:?} {:?}",
run,
balance_result,
expected_balance
);
if let (Some(expected_balance), Ok(balance_result)) = (expected_balance, balance_result)
{
if expected_balance == balance_result {
return Some(balance_result);
}
}
}
None
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> {
self.poll_for_signature_with_commitment(signature, CommitmentConfig::default())
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<()> {
let now = Instant::now();
loop {
if let Ok(Some(_)) =
self.get_signature_status_with_commitment(&signature, commitment_config)
{
break;
}
if now.elapsed().as_secs() > 15 {
return Err(RpcError::ForUser(format!(
"signature not found after {} seconds",
now.elapsed().as_secs()
))
.into());
}
sleep(Duration::from_millis(250));
}
Ok(())
}
/// Poll the server to confirm a transaction.
pub fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> ClientResult<usize> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = self.get_num_blocks_since_signature_confirmation(signature);
match response {
Ok(count) => {
if confirmed_blocks != count {
info!(
"signature {} confirmed {} out of {} after {} ms",
signature,
count,
min_confirmed_blocks,
now.elapsed().as_millis()
);
now = Instant::now();
confirmed_blocks = count;
}
if count >= min_confirmed_blocks {
break;
}
}
Err(err) => {
debug!("check_confirmations request failed: {:?}", err);
}
};
if now.elapsed().as_secs() > 20 {
info!(
"signature {} confirmed {} out of {} failed after {} ms",
signature,
confirmed_blocks,
min_confirmed_blocks,
now.elapsed().as_millis()
);
if confirmed_blocks > 0 {
return Ok(confirmed_blocks);
} else {
return Err(RpcError::ForUser(format!(
"signature not found after {} seconds",
now.elapsed().as_secs()
))
.into());
}
}
sleep(Duration::from_millis(250));
}
Ok(confirmed_blocks)
}
pub fn get_num_blocks_since_signature_confirmation(
&self,
signature: &Signature,
) -> ClientResult<usize> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
)?;
let confirmations = result.value[0]
.clone()
.ok_or_else(|| {
ClientError::new_with_request(
ClientErrorKind::Custom("signature not found".to_string()),
RpcRequest::GetSignatureStatuses,
)
})?
.confirmations
.unwrap_or(MAX_LOCKOUT_HISTORY + 1);
Ok(confirmations)
}
pub fn send_and_confirm_transaction_with_spinner(
&self,
transaction: &Transaction,
) -> ClientResult<Signature> {
self.send_and_confirm_transaction_with_spinner_and_config(
transaction,
CommitmentConfig::default(),
RpcSendTransactionConfig::default(),
)
}
pub fn send_and_confirm_transaction_with_spinner_and_commitment(
&self,
transaction: &Transaction,
commitment: CommitmentConfig,
) -> ClientResult<Signature> {
self.send_and_confirm_transaction_with_spinner_and_config(
transaction,
commitment,
RpcSendTransactionConfig::default(),
)
}
pub fn send_and_confirm_transaction_with_spinner_and_config(
&self,
transaction: &Transaction,
commitment: CommitmentConfig,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let desired_confirmations = match commitment.commitment {
CommitmentLevel::Max | CommitmentLevel::Root => MAX_LOCKOUT_HISTORY + 1,
_ => 1,
};
let mut confirmations = 0;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(&format!(
"[{}/{}] Finalizing transaction {}",
confirmations, desired_confirmations, transaction.signatures[0],
));
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
self.get_recent_blockhash_with_commitment(CommitmentConfig::recent())?
.value
.0
} else {
transaction.message.recent_blockhash
};
let signature = self.send_transaction_with_config(transaction, config)?;
let (signature, status) = loop {
// Get recent commitment in order to count confirmations for successful transactions
let status =
self.get_signature_status_with_commitment(&signature, CommitmentConfig::recent())?;
if status.is_none() {
if self
.get_fee_calculator_for_blockhash_with_commitment(
&recent_blockhash,
CommitmentConfig::recent(),
)?
.value
.is_none()
{
break (signature, status);
}
} else {
break (signature, status);
}
if cfg!(not(test)) {
sleep(Duration::from_millis(500));
}
};
if let Some(result) = status {
if let Err(err) = result {
return Err(err.into());
}
} else {
return Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds"
.to_string(),
)
.into());
}
let now = Instant::now();
loop {
match commitment.commitment {
CommitmentLevel::Max | CommitmentLevel::Root =>
// Return when default (max) commitment is reached
// Failed transactions have already been eliminated, `is_some` check is sufficient
{
if self.get_signature_status(&signature)?.is_some() {
progress_bar.set_message("Transaction confirmed");
progress_bar.finish_and_clear();
return Ok(signature);
}
}
_ => {
// Return when one confirmation has been reached
if confirmations >= desired_confirmations {
progress_bar.set_message("Transaction reached commitment");
progress_bar.finish_and_clear();
return Ok(signature);
}
}
}
progress_bar.set_message(&format!(
"[{}/{}] Finalizing transaction {}",
confirmations + 1,
desired_confirmations,
signature,
));
sleep(Duration::from_millis(500));
confirmations = self
.get_num_blocks_since_signature_confirmation(&signature)
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(
RpcError::ForUser("transaction not finalized. \
This can happen when a transaction lands in an abandoned fork. \
Please retry.".to_string()).into(),
);
}
}
}
pub fn validator_exit(&self) -> ClientResult<bool> {
self.send(RpcRequest::ValidatorExit, Value::Null)
}
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
where
T: serde::de::DeserializeOwned,
{
assert!(params.is_array() || params.is_null());
let response = self
.sender
.send(request, params)
.map_err(|err| err.into_with_request(request))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_request(err.into(), request))
}
}
#[derive(Debug, Default)]
pub struct GetConfirmedSignaturesForAddress2Config {
pub before: Option<Signature>,
pub until: Option<Signature>,
pub limit: Option<usize>,
}
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}
pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
if tls {
format!("https://{}", rpc_addr)
} else {
format!("http://{}", rpc_addr)
}
}
fn parse_keyed_accounts(
accounts: Vec<RpcKeyedAccount>,
request: RpcRequest,
) -> ClientResult<Vec<(Pubkey, Account)>> {
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
let pubkey = pubkey.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
request,
)
})?;
pubkey_accounts.push((
pubkey,
account.decode().ok_or_else(|| {
ClientError::new_with_request(
RpcError::ParseError("Account from rpc".to_string()).into(),
request,
)
})?,
));
}
Ok(pubkey_accounts)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{client_error::ClientErrorKind, mock_sender::PUBKEY};
use assert_matches::assert_matches;
use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
use serde_json::Number;
use solana_sdk::{
instruction::InstructionError, signature::Keypair, system_transaction,
transaction::TransactionError,
};
use std::{io, sync::mpsc::channel, thread};
#[test]
fn test_send() {
let (sender, receiver) = channel();
thread::spawn(move || {
let rpc_addr = "0.0.0.0:0".parse().unwrap();
let mut io = IoHandler::default();
// Successful request
io.add_method("getBalance", |_params: Params| {
Ok(Value::Number(Number::from(50)))
});
// Failed request
io.add_method("getRecentBlockhash", |params: Params| {
if params != Params::None {
Err(Error::invalid_request())
} else {
Ok(Value::String(
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx".to_string(),
))
}
});
let server = ServerBuilder::new(io)
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
sender.send(*server.address()).unwrap();
server.wait();
});
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance: u64 = rpc_client
.send(
RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
)
.unwrap();
assert_eq!(balance, 50);
let blockhash: String = rpc_client
.send(RpcRequest::GetRecentBlockhash, Value::Null)
.unwrap();
assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
// Send erroneous parameter
let blockhash: ClientResult<String> =
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]));
assert_eq!(blockhash.is_err(), true);
}
#[test]
fn test_send_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let signature = rpc_client.send_transaction(&tx);
assert_eq!(signature.unwrap(), tx.signatures[0]);
let rpc_client = RpcClient::new_mock("fails".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
// Test bad signature returned from rpc node
let rpc_client = RpcClient::new_mock("malicious".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
}
#[test]
fn test_get_recent_blockhash() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let expected_blockhash: Hash = PUBKEY.parse().unwrap();
let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash().expect("blockhash ok");
assert_eq!(blockhash, expected_blockhash);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(rpc_client.get_recent_blockhash().is_err());
}
#[test]
fn test_get_signature_status() {
let signature = Signature::default();
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Ok(())));
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, None);
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let status = rpc_client.get_signature_status(&signature).unwrap();
assert_eq!(status, Some(Err(TransactionError::AccountInUse)));
}
#[test]
fn test_send_and_confirm_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);
result.unwrap();
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
assert!(result.is_err());
let rpc_client = RpcClient::new_mock("instruction_error".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
assert_matches!(
result.unwrap_err().kind(),
ClientErrorKind::TransactionError(TransactionError::InstructionError(
0,
InstructionError::UninitializedAccount
))
);
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
if let ClientErrorKind::Io(err) = result.unwrap_err().kind() {
assert_eq!(err.kind(), io::ErrorKind::Other);
}
}
#[test]
fn test_rpc_client_thread() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
thread::spawn(move || rpc_client);
}
}
| serialize_encode_transaction |
wrappers.py | # pylint: disable=W0622,E1101
"""
A basic object-oriented interface for Galaxy entities.
"""
import abc
import json
from collections.abc import (
Iterable,
Mapping,
Sequence,
)
from typing import Tuple
import bioblend
from bioblend.util import abstractclass
__all__ = (
'Wrapper',
'Step',
'Workflow',
'LibraryContentInfo',
'HistoryContentInfo',
'DatasetContainer',
'History',
'Library',
'Folder',
'Dataset',
'HistoryDatasetAssociation',
'DatasetCollection',
'HistoryDatasetCollectionAssociation',
'LibraryDatasetDatasetAssociation',
'LibraryDataset',
'Tool',
'Job',
'LibraryPreview',
'HistoryPreview',
'WorkflowPreview',
)
@abstractclass
class Wrapper:
"""
Abstract base class for Galaxy entity wrappers.
Wrapper instances wrap deserialized JSON dictionaries such as the
ones obtained by the Galaxy web API, converting key-based access to
attribute-based access (e.g., ``library['name'] -> library.name``).
Dict keys that are converted to attributes are listed in the
``BASE_ATTRS`` class variable: this is the 'stable' interface.
Note that the wrapped dictionary is accessible via the ``wrapped``
attribute.
"""
BASE_ATTRS: Tuple[str, ...] = ('id', )
def __init__(self, wrapped, parent=None, gi=None):
"""
:type wrapped: dict
:param wrapped: JSON-serializable dictionary
:type parent: :class:`Wrapper`
:param parent: the parent of this wrapper
:type gi: :class:`GalaxyInstance`
:param gi: the GalaxyInstance through which we can access this wrapper
"""
if not isinstance(wrapped, Mapping):
raise TypeError('wrapped object must be a mapping type')
# loads(dumps(x)) is a bit faster than deepcopy and allows type checks
try:
dumped = json.dumps(wrapped)
except (TypeError, ValueError):
raise ValueError('wrapped object must be JSON-serializable')
object.__setattr__(self, 'wrapped', json.loads(dumped))
for k in self.BASE_ATTRS:
object.__setattr__(self, k, self.wrapped.get(k))
object.__setattr__(self, '_cached_parent', parent)
object.__setattr__(self, 'is_modified', False)
object.__setattr__(self, 'gi', gi)
@property
def parent(self):
"""
The parent of this wrapper.
"""
return self._cached_parent
@property
def is_mapped(self):
"""
``True`` if this wrapper is mapped to an actual Galaxy entity.
"""
return self.id is not None
def unmap(self):
"""
Disconnect this wrapper from Galaxy.
"""
object.__setattr__(self, 'id', None)
def clone(self):
"""
Return an independent copy of this wrapper.
"""
return self.__class__(self.wrapped)
def touch(self):
"""
Mark this wrapper as having been modified since its creation.
"""
object.__setattr__(self, 'is_modified', True)
if self.parent:
self.parent.touch()
def to_json(self):
"""
Return a JSON dump of this wrapper.
"""
return json.dumps(self.wrapped)
@classmethod
def from_json(cls, jdef):
"""
Build a new wrapper from a JSON dump.
"""
return cls(json.loads(jdef))
# FIXME: things like self.x[0] = 'y' do NOT call self.__setattr__
def __setattr__(self, name, value):
if name not in self.wrapped:
raise AttributeError("can't set attribute")
else:
self.wrapped[name] = value
object.__setattr__(self, name, value)
self.touch()
def __repr__(self):
return f"{self.__class__.__name__}({self.wrapped!r})"
class Step(Wrapper):
"""
Workflow step.
Steps are the main building blocks of a Galaxy workflow. A step can be: an
input (type ``data_collection_input``, ``data_input`` or
``parameter_input``), a computational tool (type ``tool``), a subworkflow
(type ``subworkflow``) or a pause (type ``pause``).
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'input_steps',
'name',
'tool_id',
'tool_inputs',
'tool_version',
'type',
)
def __init__(self, step_dict, parent):
super().__init__(step_dict, parent=parent, gi=parent.gi)
try:
stype = step_dict['type']
except KeyError:
raise ValueError('not a step dict')
if stype not in {'data_collection_input', 'data_input', 'parameter_input', 'pause', 'subworkflow', 'tool'}:
raise ValueError(f"Unknown step type: {stype!r}")
class InvocationStep(Wrapper):
"""
Invocation step.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'action',
'job_id',
'order_index',
'state',
'update_time',
'workflow_step_id',
'workflow_step_label',
'workflow_step_uuid',
)
class Workflow(Wrapper):
"""
Workflows represent ordered sequences of computations on Galaxy.
A workflow defines a sequence of steps that produce one or more
results from an input dataset.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'inputs',
'latest_workflow_uuid',
'name',
'owner',
'published',
'steps',
'tags',
)
POLLING_INTERVAL = 10 # for output state monitoring
def __init__(self, wf_dict, gi=None):
super().__init__(wf_dict, gi=gi)
missing_ids = []
if gi:
tools_list_by_id = [t.id for t in gi.tools.get_previews()]
else:
tools_list_by_id = []
tool_labels_to_ids = {}
for k, v in self.steps.items():
# convert step ids to str for consistency with outer keys
v['id'] = str(v['id'])
for i in v['input_steps'].values():
i['source_step'] = str(i['source_step'])
step = Step(v, self)
self.steps[k] = step
if step.type == 'tool':
if not step.tool_inputs or step.tool_id not in tools_list_by_id:
missing_ids.append(k)
tool_labels_to_ids.setdefault(step.tool_id, set()).add(step.id)
input_labels_to_ids = {}
for id_, d in self.inputs.items():
input_labels_to_ids.setdefault(d['label'], set()).add(id_)
object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
dag, inv_dag = self._get_dag()
heads, tails = set(dag), set(inv_dag)
object.__setattr__(self, 'dag', dag)
object.__setattr__(self, 'inv_dag', inv_dag)
object.__setattr__(self, 'source_ids', heads - tails)
assert set(self.inputs) == self.data_collection_input_ids | self.data_input_ids | self.parameter_input_ids, \
f"inputs is {self.inputs!r}, while data_collection_input_ids is {self.data_collection_input_ids!r}, data_input_ids is {self.data_input_ids!r} and parameter_input_ids is {self.parameter_input_ids!r}"
object.__setattr__(self, 'sink_ids', tails - heads)
object.__setattr__(self, 'missing_ids', missing_ids)
def _get_dag(self):
"""
Return the workflow's DAG.
For convenience, this method computes a 'direct' (step =>
successors) and an 'inverse' (step => predecessors)
representation of the same DAG.
For instance, a workflow with a single tool *c*, two inputs
*a, b* and three outputs *d, e, f* is represented by (direct)::
{'a': {'c'}, 'b': {'c'}, 'c': {'d', 'e', 'f'}}
and by (inverse)::
{'c': {'a', 'b'}, 'd': {'c'}, 'e': {'c'}, 'f': {'c'}}
"""
dag, inv_dag = {}, {}
for s in self.steps.values():
for i in s.input_steps.values():
head, tail = i['source_step'], s.id
dag.setdefault(head, set()).add(tail)
inv_dag.setdefault(tail, set()).add(head)
return dag, inv_dag
def sorted_step_ids(self):
"""
Return a topological sort of the workflow's DAG.
"""
ids = []
source_ids = self.source_ids.copy()
inv_dag = {k: v.copy() for k, v in self.inv_dag.items()}
while source_ids:
head = source_ids.pop()
ids.append(head)
for tail in self.dag.get(head, []):
incoming = inv_dag[tail]
incoming.remove(head)
if not incoming:
source_ids.add(tail)
return ids
@property
def data_input_ids(self):
"""
Return the ids of data input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'data_input'}
@property
def data_collection_input_ids(self):
"""
Return the ids of data collection input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'data_collection_input'}
@property
def parameter_input_ids(self):
"""
Return the ids of parameter input steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'parameter_input'}
@property
def tool_ids(self):
"""
Return the ids of tool steps for this workflow.
"""
return {id_ for id_, s in self.steps.items() if s.type == 'tool'}
@property
def input_labels(self):
"""
Return the labels of this workflow's input steps.
"""
return set(self.input_labels_to_ids)
@property
def is_runnable(self):
"""
Return True if the workflow can be run on Galaxy.
A workflow is considered runnable on a Galaxy instance if all
of the tools it uses are installed in that instance.
"""
return not self.missing_ids
def convert_input_map(self, input_map):
"""
Convert ``input_map`` to the format required by the Galaxy web API.
:type input_map: dict
:param input_map: a mapping from input labels to datasets
:rtype: dict
:return: a mapping from input slot ids to dataset ids in the
format required by the Galaxy web API.
"""
m = {}
for label, slot_ids in self.input_labels_to_ids.items():
datasets = input_map.get(label, [])
if not isinstance(datasets, Iterable):
datasets = [datasets]
if len(datasets) < len(slot_ids):
raise RuntimeError(f'not enough datasets for "{label}"')
for id_, ds in zip(slot_ids, datasets):
m[id_] = {'id': ds.id, 'src': ds.SRC}
return m
def preview(self):
getf = self.gi.workflows.get_previews
try:
p = [_ for _ in getf(published=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def run(self, input_map=None, history='', params=None, import_inputs=False,
replacement_params=None, wait=False,
polling_interval=POLLING_INTERVAL, break_on_error=True):
"""
Run the workflow in the current Galaxy instance.
.. deprecated:: 0.16.0
Use :meth:`invoke` instead.
:type input_map: dict
:param input_map: a mapping from workflow input labels to
datasets, e.g.: ``dict(zip(workflow.input_labels,
library.get_datasets()))``
:type history: :class:`History` or str
:param history: either a valid history object (results will be
stored there) or a string (a new history will be created with
the given name).
:type params: dict
:param params: a mapping of non-datasets tool parameters (see below)
:type import_inputs: bool
:param import_inputs: If ``True``, workflow inputs will be imported into
the history; if ``False``, only workflow outputs will be visible in
the history.
:type replacement_params: dict
:param replacement_params: pattern-based replacements for
post-job actions (see the docs for
:meth:`~bioblend.galaxy.workflows.WorkflowClient.invoke_workflow`)
:type wait: bool
:param wait: whether to wait while the returned datasets are
in a pending state
:type polling_interval: float
:param polling_interval: polling interval in seconds
:type break_on_error: bool
:param break_on_error: whether to break as soon as at least one
of the returned datasets is in the 'error' state
:rtype: tuple
:return: list of output datasets, output history
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
Example: set 'a' to 1 for the third workflow step::
params = {workflow.steps[2].id: {'a': 1}}
.. warning::
This is a blocking operation that can take a very long time. If
``wait`` is set to ``False``, the method will return as soon as the
workflow has been *scheduled*, otherwise it will wait until the
workflow has been *run*. With a large number of steps, however, the
delay may not be negligible even in the former case (e.g. minutes for
100 steps).
"""
if not self.is_mapped:
raise RuntimeError('workflow is not mapped to a Galaxy object')
if not self.is_runnable:
missing_tools_str = ', '.join(f"{self.steps[step_id].tool_id}[{step_id}]" for step_id in self.missing_ids)
raise RuntimeError(f"workflow has missing tools: {missing_tools_str}")
kwargs = {
'dataset_map': self.convert_input_map(input_map or {}),
'params': params,
'import_inputs_to_history': import_inputs,
'replacement_params': replacement_params,
}
if isinstance(history, History):
try:
kwargs['history_id'] = history.id
except AttributeError:
raise RuntimeError('history does not have an id')
elif isinstance(history, str):
kwargs['history_name'] = history
else:
raise TypeError(
'history must be either a history wrapper or a string')
res = self.gi.gi.workflows.run_workflow(self.id, **kwargs)
# res structure: {'history': HIST_ID, 'outputs': [CI_ID, CI_ID, ...]}
out_hist = self.gi.histories.get(res['history'])
content_infos_dict = {ci.id: ci for ci in out_hist.content_infos}
outputs = []
for output_id in res['outputs']:
if content_infos_dict[output_id].type == 'file':
outputs.append(out_hist.get_dataset(output_id))
elif content_infos_dict[output_id].type == 'collection':
outputs.append(out_hist.get_dataset_collection(output_id))
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval,
break_on_error=break_on_error)
return outputs, out_hist
def export(self):
"""
Export a re-importable representation of the workflow.
:rtype: dict
:return: a JSON-serializable dump of the workflow
"""
return self.gi.gi.workflows.export_workflow_dict(self.id)
def delete(self):
"""
Delete this workflow.
.. warning::
Deleting a workflow is irreversible - all of the data from
the workflow will be permanently deleted.
"""
self.gi.workflows.delete(id_=self.id)
self.unmap()
def invoke(self, inputs=None, params=None, history=None,
import_inputs_to_history=None, replacement_params=None,
allow_tool_state_corrections=True, inputs_by=None,
parameters_normalized=False):
"""
Invoke the workflow. This will cause a workflow to be scheduled
and return an object describing the workflow invocation.
:type inputs: dict
:param inputs: A mapping of workflow inputs to datasets and dataset collections.
The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``),
LibraryDataset (``ld``), HistoryDatasetAssociation (``hda``), or
HistoryDatasetCollectionAssociation (``hdca``).
The map must be in the following format:
``{'<input_index>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda, hdca]'}}``
(e.g. ``{'2': {'id': '29beef4fadeed09f', 'src': 'hda'}}``)
This map may also be indexed by the UUIDs of the workflow steps,
as indicated by the ``uuid`` property of steps returned from the
Galaxy API. Alternatively workflow steps may be addressed by
the label that can be set in the workflow editor. If using
uuid or label you need to also set the ``inputs_by`` parameter
to ``step_uuid`` or ``name``.
:type params: dict
:param params: A mapping of non-datasets tool parameters (see below)
:type history: str
:param history: The history in which to store the workflow
output.
:type import_inputs_to_history: bool
:param import_inputs_to_history: If ``True``, used workflow inputs will
be imported into the history. If ``False``, only workflow outputs will
be visible in the given history.
:type allow_tool_state_corrections: bool
:param allow_tool_state_corrections: If True, allow Galaxy to fill in
missing tool state when running workflows. This may be useful for
workflows using tools that have changed over time or for workflows
built outside of Galaxy with only a subset of inputs defined.
:type replacement_params: dict
:param replacement_params: pattern-based replacements for post-job
actions (see below)
:type inputs_by: str
:param inputs_by: Determines how inputs are referenced. Can be
"step_index|step_uuid" (default), "step_index", "step_id", "step_uuid", or "name".
:type parameters_normalized: bool
:param parameters_normalized: Whether Galaxy should normalize ``params``
to ensure everything is referenced by a numeric step ID. Default is
``False``, but when setting ``params`` for a subworkflow, ``True`` is
required.
:rtype: Invocation
:return: the workflow invocation
The ``params`` dict should be specified as follows::
{STEP_ID: PARAM_DICT, ...}
where PARAM_DICT is::
{PARAM_NAME: VALUE, ...}
For backwards compatibility, the following (deprecated) format is
also supported for ``params``::
{TOOL_ID: PARAM_DICT, ...}
in which case PARAM_DICT affects all steps with the given tool id.
If both by-tool-id and by-step-id specifications are used, the
latter takes precedence.
Finally (again, for backwards compatibility), PARAM_DICT can also
be specified as::
{'param': PARAM_NAME, 'value': VALUE}
Note that this format allows only one parameter to be set per step.
For a ``repeat`` parameter, the names of the contained parameters needs
to be specified as ``<repeat name>_<repeat index>|<param name>``, with
the repeat index starting at 0. For example, if the tool XML contains::
<repeat name="cutoff" title="Parameters used to filter cells" min="1">
<param name="name" type="text" value="n_genes" label="Name of param...">
<option value="n_genes">n_genes</option>
<option value="n_counts">n_counts</option>
</param>
<param name="min" type="float" min="0" value="0" label="Min value"/>
</repeat>
then the PARAM_DICT should be something like::
{...
"cutoff_0|name": "n_genes",
"cutoff_0|min": "2",
"cutoff_1|name": "n_counts",
"cutoff_1|min": "4",
...}
At the time of this writing, it is not possible to change the number of
times the contained parameters are repeated. Therefore, the parameter
indexes can go from 0 to n-1, where n is the number of times the
repeated element was added when the workflow was saved in the Galaxy UI.
The ``replacement_params`` dict should map parameter names in
post-job actions (PJAs) to their runtime values. For
instance, if the final step has a PJA like the following::
{'RenameDatasetActionout_file1': {'action_arguments': {'newname': '${output}'},
'action_type': 'RenameDatasetAction',
'output_name': 'out_file1'}}
then the following renames the output dataset to 'foo'::
replacement_params = {'output': 'foo'}
see also `this email thread
<http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.
.. warning::
Historically, the ``run_workflow`` method consumed a ``dataset_map``
data structure that was indexed by unencoded workflow step IDs. These
IDs would not be stable across Galaxy instances. The new ``inputs``
property is instead indexed by either the ``order_index`` property
(which is stable across workflow imports) or the step UUID which is
also stable.
"""
inv_dict = self.gi.gi.workflows.invoke_workflow(
workflow_id=self.id,
inputs=inputs,
params=params,
history_id=history.id,
import_inputs_to_history=import_inputs_to_history,
replacement_params=replacement_params,
allow_tool_state_corrections=allow_tool_state_corrections,
inputs_by=inputs_by,
parameters_normalized=parameters_normalized
)
return self.gi.invocations.get(inv_dict['id'])
class Invocation(Wrapper):
"""
Invocation of a workflow.
This causes the steps of a workflow to be executed in sequential order.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'inputs',
'state',
'steps',
'update_time',
'uuid',
'workflow_id',
)
def __init__(self, inv_dict, gi=None):
super().__init__(inv_dict, gi=gi)
self.steps = [InvocationStep(step, self) for step in self.steps]
self.inputs = [{**v, 'label': k} for k, v in self.inputs.items()]
def sorted_step_ids(self):
"""
Get the step IDs sorted based on this order index.
:rtype: list of str
:param: sorted step IDs
"""
return [step.id for step in sorted(self.steps, key=lambda step: step.order_index)]
def step_states(self):
"""
Get the set of step states for this invocation.
:rtype: set
:param: step states
"""
return {step.state for step in self.steps}
def number_of_steps(self):
"""
Get the number of steps for this invocation.
:rtype: int
:param: number of steps
"""
return len(self.steps)
def sorted_steps_by(self, indices=None, states=None, step_ids=None):
"""
Get steps for this invocation, or get a subset by specifying
optional parameters for filtering.
:type indices: list of int
:param indices: return steps that have matching order_index
:type states: list of str
:param states: return steps that have matching states
:type step_ids: list of str
:param step_ids: return steps that have matching step_ids
:rtype: list of InvocationStep
:param: invocation steps
"""
steps = self.steps
if indices is not None:
steps = filter(lambda step: step.order_index in indices, steps)
if states is not None:
steps = filter(lambda step: step.state in states, steps)
if step_ids is not None:
steps = filter(lambda step: step.id in step_ids, steps)
return sorted(steps, key=lambda step: step.order_index)
def cancel(self):
"""
Cancel this invocation.
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.cancel_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def refresh(self):
"""
Update this invocation with the latest information from the server.
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.show_invocation(self.id)
self.__init__(inv_dict, gi=self.gi)
def run_step_actions(self, steps, actions):
"""
Run actions for active steps of this invocation.
:type steps: list of InvocationStep
:param steps: list of steps to run actions on
:type actions: list of str
:param actions: list of actions to run
.. note::
On success, this method updates the Invocation object's internal step variables.
"""
if not len(steps) == len(actions):
raise RuntimeError(f'Different number of ``steps`` ({len(steps)}) and ``actions`` ({len(actions)}) in ``{self}.run_step_actions()``')
step_dict_list = [self.gi.gi.invocations.run_invocation_step_action(self.id, step.id, action) for step, action in zip(steps, actions)]
for step, step_dict in zip(steps, step_dict_list):
step.__init__(step_dict, parent=self)
def summary(self):
"""
Get a summary for this invocation.
:rtype: dict
:param: invocation summary
"""
return self.gi.gi.invocations.get_invocation_summary(self.id)
def step_jobs_summary(self):
"""
Get a summary for this invocation's step jobs.
:rtype: list of dicts
:param: step job summaries
"""
return self.gi.gi.invocations.get_invocation_step_jobs_summary(self.id)
def report(self):
"""
Get a dictionary containing a Markdown report for this invocation.
:rtype: dict
:param: invocation report
"""
return self.gi.gi.invocations.get_invocation_report(self.id)
def save_report_pdf(self, file_path, chunk_size=bioblend.CHUNK_SIZE):
"""
Download a PDF report for this invocation.
:type file_path: str
:param file_path: path to save the report
:type chunk_size: int
:param chunk_size: chunk size in bytes for reading remote data
"""
self.gi.gi.invocations.get_invocation_report_pdf(self.id, file_path, chunk_size)
def biocompute_object(self):
"""
Get a BioCompute object for this invocation.
:rtype: dict
:param: BioCompute object
"""
return self.gi.gi.invocations.get_invocation_biocompute_object(self.id)
def wait(self, maxwait=12000, interval=3, check=True):
"""
Wait for this invocation to reach a terminal state.
:type maxwait: float
:param maxwait: upper limit on waiting time
:type interval: float
:param interval: polling interval in secconds
:type check: bool
:param check: if ``true``, raise an error if the terminal state is not 'scheduled'
.. note::
On success, this method updates the Invocation object's internal variables.
"""
inv_dict = self.gi.gi.invocations.wait_for_invocation(self.id, maxwait=maxwait, interval=interval, check=check)
self.__init__(inv_dict, gi=self.gi)
class Dataset(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for Galaxy datasets.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'data_type',
'file_ext',
'file_name',
'file_size',
'genome_build',
'misc_info',
'name',
'state',
)
POLLING_INTERVAL = 1 # for state monitoring
def __init__(self, ds_dict, container, gi=None):
super().__init__(ds_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
@abc.abstractmethod
def _stream_url(self):
"""
Return the URL to stream this dataset.
"""
pass
def get_stream(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return an iterator over its contents.
:type chunk_size: int
:param chunk_size: read this amount of bytes at a time
"""
kwargs = {'stream': True}
if isinstance(self, LibraryDataset):
kwargs['params'] = {'ld_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
if isinstance(self, LibraryDataset) and r.status_code == 500:
# compatibility with older Galaxy releases
kwargs['params'] = {'ldda_ids%5B%5D': self.id}
r = self.gi.gi.make_get_request(self._stream_url, **kwargs)
r.raise_for_status()
return r.iter_content(chunk_size) # FIXME: client can't close r
def peek(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return the first chunk.
See :meth:`.get_stream` for param info.
"""
try:
return next(self.get_stream(chunk_size=chunk_size))
except StopIteration:
return b''
def download(self, file_object, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and save its contents to ``file_object``.
:type file_object: file
:param file_object: output file object
See :meth:`.get_stream` for info on other params.
"""
for chunk in self.get_stream(chunk_size=chunk_size):
file_object.write(chunk)
def get_contents(self, chunk_size=bioblend.CHUNK_SIZE):
"""
Open dataset for reading and return its **full** contents.
See :meth:`.get_stream` for param info.
"""
return b''.join(self.get_stream(chunk_size=chunk_size))
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
ds_dict = gi_client.show_dataset(self.container.id, self.id)
self.__init__(ds_dict, self.container, self.gi)
return self
def wait(self, polling_interval=POLLING_INTERVAL, break_on_error=True):
"""
Wait for this dataset to come out of the pending states.
:type polling_interval: float
:param polling_interval: polling interval in seconds
:type break_on_error: bool
:param break_on_error: if ``True``, raise a RuntimeError exception if
the dataset ends in the 'error' state.
.. warning::
This is a blocking operation that can take a very long time. Also,
note that this method does not return anything; however, this dataset
is refreshed (possibly multiple times) during the execution.
"""
self.gi._wait_datasets([self], polling_interval=polling_interval,
break_on_error=break_on_error)
class HistoryDatasetAssociation(Dataset):
"""
Maps to a Galaxy ``HistoryDatasetAssociation``.
"""
BASE_ATTRS = Dataset.BASE_ATTRS + ('annotation', 'deleted', 'purged', 'tags', 'visible')
SRC = 'hda'
@property
def _stream_url(self):
base_url = self.gi.gi.histories._make_url(module_id=self.container.id, contents=True)
return f"{base_url}/{self.id}/display"
def update(self, **kwds):
"""
Update this history dataset metadata. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history dataset name with the given string
:type genome_build: str
:param genome_build: Replace history dataset genome build (dbkey)
:type annotation: str
:param annotation: Replace history dataset annotation with given string
:type deleted: bool
:param deleted: Mark or unmark history dataset as deleted
:type visible: bool
:param visible: Mark or unmark history dataset as visible
"""
res = self.gi.gi.histories.update_dataset(self.container.id, self.id, **kwds)
# Refresh also the history because the dataset may have been (un)deleted
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
def delete(self, purge=False):
"""
Delete this history dataset.
:type purge: bool
:param purge: if ``True``, also purge (permanently delete) the dataset
.. note::
For the purge option to work, the Galaxy instance must have the
``allow_user_dataset_purge`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
"""
self.gi.gi.histories.delete_dataset(self.container.id, self.id, purge=purge)
self.container.refresh()
self.refresh()
class DatasetCollection(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for Galaxy dataset collections.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'collection_type',
'deleted',
'name',
'state',
)
def __init__(self, dsc_dict, container, gi=None):
super().__init__(dsc_dict, gi=gi)
object.__setattr__(self, 'container', container)
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
gi_client = getattr(self.gi.gi, self.container.API_MODULE)
dsc_dict = gi_client.show_dataset_collection(self.container.id, self.id)
self.__init__(dsc_dict, self.container, self.gi)
return self
@abc.abstractmethod
def delete(self):
pass
class HistoryDatasetCollectionAssociation(DatasetCollection):
"""
Maps to a Galaxy ``HistoryDatasetCollectionAssociation``.
"""
BASE_ATTRS = DatasetCollection.BASE_ATTRS + ('tags', 'visible', 'elements')
SRC = 'hdca'
def delete(self):
"""
Delete this dataset collection.
"""
self.gi.gi.histories.delete_dataset_collection(self.container.id, self.id)
self.container.refresh()
self.refresh()
@abstractclass
class LibRelatedDataset(Dataset):
"""
Base class for LibraryDatasetDatasetAssociation and LibraryDataset classes.
"""
@property
def _stream_url(self):
base_url = self.gi.gi.libraries._make_url()
return f"{base_url}/datasets/download/uncompressed"
class LibraryDatasetDatasetAssociation(LibRelatedDataset):
"""
Maps to a Galaxy ``LibraryDatasetDatasetAssociation``.
"""
BASE_ATTRS = LibRelatedDataset.BASE_ATTRS + ('deleted',)
SRC = 'ldda'
class LibraryDataset(LibRelatedDataset):
"""
Maps to a Galaxy ``LibraryDataset``.
"""
SRC = 'ld'
def delete(self, purged=False):
"""
Delete this library dataset.
:type purged: bool
:param purged: if ``True``, also purge (permanently delete) the dataset
"""
self.gi.gi.libraries.delete_library_dataset(
self.container.id, self.id, purged=purged)
self.container.refresh()
self.refresh()
def update(self, **kwds):
"""
Update this library dataset metadata. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history dataset name with the given string
:type genome_build: str
:param genome_build: Replace history dataset genome build (dbkey)
"""
res = self.gi.gi.libraries.update_library_dataset(self.id, **kwds)
self.container.refresh()
self.__init__(res, self.container, gi=self.gi)
return self
@abstractclass
class ContentInfo(Wrapper):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/{histories,libraries}/<ID>/contents`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'type',
)
class LibraryContentInfo(ContentInfo):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/libraries/<ID>/contents`` from Galaxy.
"""
class HistoryContentInfo(ContentInfo):
"""
Instances of this class wrap dictionaries obtained by getting
``/api/histories/<ID>/contents`` from Galaxy.
"""
BASE_ATTRS = ContentInfo.BASE_ATTRS + ('deleted', 'state', 'visible')
class DatasetContainer(Wrapper, metaclass=abc.ABCMeta):
"""
Abstract base class for dataset containers (histories and libraries).
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
def __init__(self, c_dict, content_infos=None, gi=None):
"""
:type content_infos: list of :class:`ContentInfo`
:param content_infos: info objects for the container's contents
"""
super().__init__(c_dict, gi=gi)
if content_infos is None:
content_infos = []
object.__setattr__(self, 'content_infos', content_infos)
object.__setattr__(self, 'obj_gi_client', getattr(self.gi, self.API_MODULE))
@property
@abc.abstractmethod
def API_MODULE(self):
pass
@property
def dataset_ids(self):
"""
Return the ids of the contained datasets.
"""
return [_.id for _ in self.content_infos if _.type == 'file']
def preview(self):
getf = self.obj_gi_client.get_previews
# self.state could be stale: check both regular and deleted containers
try:
p = [_ for _ in getf() if _.id == self.id][0]
except IndexError:
try:
p = [_ for _ in getf(deleted=True) if _.id == self.id][0]
except IndexError:
raise ValueError(f"no object for id {self.id}")
return p
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
fresh = self.obj_gi_client.get(self.id)
self.__init__(
fresh.wrapped, content_infos=fresh.content_infos, gi=self.gi)
return self
def get_dataset(self, ds_id):
"""
Retrieve the dataset corresponding to the given id.
:type ds_id: str
:param ds_id: dataset id
:rtype: :class:`~.HistoryDatasetAssociation` or
:class:`~.LibraryDataset`
:return: the dataset corresponding to ``ds_id``
"""
gi_client = getattr(self.gi.gi, self.API_MODULE)
ds_dict = gi_client.show_dataset(self.id, ds_id)
return self.DS_TYPE(ds_dict, self, gi=self.gi)
def get_datasets(self, name=None):
"""
Get all datasets contained inside this dataset container.
:type name: str
:param name: return only datasets with this name
:rtype: list of :class:`~.HistoryDatasetAssociation` or list of
:class:`~.LibraryDataset`
:return: datasets with the given name contained inside this
container
.. note::
when filtering library datasets by name, specify their full
paths starting from the library's root folder, e.g.,
``/seqdata/reads.fastq``. Full paths are available through
the ``content_infos`` attribute of
:class:`~.Library` objects.
"""
if name is None:
ds_ids = self.dataset_ids
else:
ds_ids = [_.id for _ in self.content_infos if _.name == name]
return [self.get_dataset(_) for _ in ds_ids]
class History(DatasetContainer):
"""
Maps to a Galaxy history.
"""
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('annotation', 'published', 'state', 'state_ids', 'state_details', 'tags')
DS_TYPE = HistoryDatasetAssociation
DSC_TYPE = HistoryDatasetCollectionAssociation
CONTENT_INFO_TYPE = HistoryContentInfo
API_MODULE = 'histories'
def update(self, **kwds):
"""
Update history metadata information. Some of the attributes that can be
modified are documented below.
:type name: str
:param name: Replace history name with the given string
:type annotation: str
:param annotation: Replace history annotation with the given string
:type deleted: bool
:param deleted: Mark or unmark history as deleted
:type purged: bool
:param purged: If True, mark history as purged (permanently deleted).
:type published: bool
:param published: Mark or unmark history as published
:type importable: bool
:param importable: Mark or unmark history as importable
:type tags: list
:param tags: Replace history tags with the given list
"""
# TODO: wouldn't it be better if name and annotation were attributes?
self.gi.gi.histories.update_history(self.id, **kwds)
self.refresh()
return self
def delete(self, purge=False):
"""
Delete this history.
:type purge: bool
:param purge: if ``True``, also purge (permanently delete) the history
.. note::
For the purge option to work, the Galaxy instance must have the
``allow_user_dataset_purge`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
"""
self.gi.histories.delete(id_=self.id, purge=purge)
self.refresh()
self.unmap()
def import_dataset(self, lds):
"""
Import a dataset into the history from a library.
:type lds: :class:`~.LibraryDataset`
:param lds: the library dataset to import
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the imported history dataset
"""
if not self.is_mapped:
raise RuntimeError('history is not mapped to a Galaxy object')
if not isinstance(lds, LibraryDataset):
raise TypeError('lds is not a LibraryDataset')
res = self.gi.gi.histories.upload_dataset_from_library(self.id, lds.id)
if not isinstance(res, Mapping):
raise RuntimeError(
f"upload_dataset_from_library: unexpected reply: {res!r}"
)
self.refresh()
return self.get_dataset(res['id'])
def upload_file(self, path, **kwargs):
"""
Upload the file specified by ``path`` to this history.
:type path: str
:param path: path of the file to upload
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters.
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.upload_file(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
upload_dataset = upload_file
def upload_from_ftp(self, path, **kwargs):
"""
Upload the file specified by ``path`` from the user's FTP directory to
this history.
:type path: str
:param path: path of the file in the user's FTP directory
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters.
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.upload_from_ftp(path, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id'])
def paste_content(self, content, **kwargs):
|
def export(self, gzip=True, include_hidden=False, include_deleted=False,
wait=False, maxwait=None):
"""
Start a job to create an export archive for this history. See
:meth:`~bioblend.galaxy.histories.HistoryClient.export_history`
for parameter and return value info.
"""
return self.gi.gi.histories.export_history(
self.id, gzip=gzip, include_hidden=include_hidden,
include_deleted=include_deleted, wait=wait, maxwait=maxwait)
def download(self, jeha_id, outf, chunk_size=bioblend.CHUNK_SIZE):
"""
Download an export archive for this history. Use :meth:`export`
to create an export and get the required ``jeha_id``. See
:meth:`~bioblend.galaxy.histories.HistoryClient.download_history`
for parameter and return value info.
"""
return self.gi.gi.histories.download_history(
self.id, jeha_id, outf, chunk_size=chunk_size)
def create_dataset_collection(self, collection_description):
"""
Create a new dataset collection in the history by providing a collection description.
:type collection_description: bioblend.galaxy.dataset_collections.CollectionDescription
:param collection_description: a description of the dataset collection
:rtype: :class:`~.HistoryDatasetCollectionAssociation`
:return: the new dataset collection
"""
dataset_collection = self.gi.gi.histories.create_dataset_collection(self.id, collection_description)
self.refresh()
return self.get_dataset_collection(dataset_collection['id'])
def get_dataset_collection(self, dsc_id):
"""
Retrieve the dataset collection corresponding to the given id.
:type dsc_id: str
:param dsc_id: dataset collection id
:rtype: :class:`~.HistoryDatasetCollectionAssociation`
:return: the dataset collection corresponding to ``dsc_id``
"""
dsc_dict = self.gi.gi.histories.show_dataset_collection(self.id, dsc_id)
return self.DSC_TYPE(dsc_dict, self, gi=self.gi)
class Library(DatasetContainer):
"""
Maps to a Galaxy library.
"""
BASE_ATTRS = DatasetContainer.BASE_ATTRS + ('description', 'synopsis')
DS_TYPE = LibraryDataset
CONTENT_INFO_TYPE = LibraryContentInfo
API_MODULE = 'libraries'
@property
def folder_ids(self):
"""
Return the ids of the contained folders.
"""
return [_.id for _ in self.content_infos if _.type == 'folder']
def delete(self):
"""
Delete this library.
"""
self.gi.libraries.delete(id_=self.id)
self.refresh()
self.unmap()
def _pre_upload(self, folder):
"""
Return the id of the given folder, after sanity checking.
"""
if not self.is_mapped:
raise RuntimeError('library is not mapped to a Galaxy object')
return None if folder is None else folder.id
def upload_data(self, data, folder=None, **kwargs):
"""
Upload data to this library.
:type data: str
:param data: dataset contents
:type folder: :class:`~.Folder`
:param folder: a folder object, or ``None`` to upload to the root folder
:rtype: :class:`~.LibraryDataset`
:return: the dataset object that represents the uploaded content
Optional keyword arguments: ``file_type``, ``dbkey``.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_contents(
self.id, data, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_url(self, url, folder=None, **kwargs):
"""
Upload data to this library from the given URL.
:type url: str
:param url: URL from which data should be read
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_url(
self.id, url, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_local(self, path, folder=None, **kwargs):
"""
Upload data to this library from a local file.
:type path: str
:param path: local file path from which data should be read
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.upload_file_from_local_path(
self.id, path, folder_id=fid, **kwargs)
self.refresh()
return self.get_dataset(res[0]['id'])
def upload_from_galaxy_fs(self, paths, folder=None, link_data_only=None, **kwargs):
"""
Upload data to this library from filesystem paths on the server.
.. note::
For this method to work, the Galaxy instance must have the
``allow_path_paste`` option set to ``true`` in the
``config/galaxy.yml`` configuration file.
:type paths: str or :class:`~collections.abc.Iterable` of str
:param paths: server-side file paths from which data should be read
:type link_data_only: str
:param link_data_only: either 'copy_files' (default) or
'link_to_files'. Setting to 'link_to_files' symlinks instead of
copying the files
:rtype: list of :class:`~.LibraryDataset`
:return: the dataset objects that represent the uploaded content
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
if isinstance(paths, str):
paths = (paths,)
paths = '\n'.join(paths)
res = self.gi.gi.libraries.upload_from_galaxy_filesystem(
self.id, paths, folder_id=fid, link_data_only=link_data_only,
**kwargs)
if res is None:
raise RuntimeError('upload_from_galaxy_filesystem: no reply')
if not isinstance(res, Sequence):
raise RuntimeError(
f"upload_from_galaxy_filesystem: unexpected reply: {res!r}"
)
new_datasets = [
self.get_dataset(ds_info['id']) for ds_info in res
]
self.refresh()
return new_datasets
def copy_from_dataset(self, hda, folder=None, message=''):
"""
Copy a history dataset into this library.
:type hda: :class:`~.HistoryDatasetAssociation`
:param hda: history dataset to copy into the library
See :meth:`.upload_data` for info on other params.
"""
fid = self._pre_upload(folder)
res = self.gi.gi.libraries.copy_from_dataset(
self.id, hda.id, folder_id=fid, message=message)
self.refresh()
return self.get_dataset(res['library_dataset_id'])
def create_folder(self, name, description=None, base_folder=None):
"""
Create a folder in this library.
:type name: str
:param name: folder name
:type description: str
:param description: optional folder description
:type base_folder: :class:`~.Folder`
:param base_folder: parent folder, or ``None`` to create in the root
folder
:rtype: :class:`~.Folder`
:return: the folder just created
"""
bfid = None if base_folder is None else base_folder.id
res = self.gi.gi.libraries.create_folder(
self.id, name, description=description, base_folder_id=bfid)
self.refresh()
return self.get_folder(res[0]['id'])
def get_folder(self, f_id):
"""
Retrieve the folder corresponding to the given id.
:rtype: :class:`~.Folder`
:return: the folder corresponding to ``f_id``
"""
f_dict = self.gi.gi.libraries.show_folder(self.id, f_id)
return Folder(f_dict, self, gi=self.gi)
@property
def root_folder(self):
"""
The root folder of this library.
:rtype: :class:`~.Folder`
:return: the root folder of this library
"""
return self.get_folder(self.gi.gi.libraries._get_root_folder_id(self.id))
class Folder(Wrapper):
"""
Maps to a folder in a Galaxy library.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'description',
'item_count',
'name',
)
def __init__(self, f_dict, container, gi=None):
super().__init__(f_dict, gi=gi)
object.__setattr__(self, 'container', container)
@property
def parent(self):
"""
The parent folder of this folder. The parent of the root folder is
``None``.
:rtype: :class:`~.Folder`
:return: the parent of this folder
"""
if self._cached_parent is None:
object.__setattr__(self,
'_cached_parent',
self._get_parent())
return self._cached_parent
def _get_parent(self):
"""
Return the parent folder of this folder.
"""
parent_id = self.wrapped['parent_id']
if parent_id is None:
return None
return self.container.get_folder(parent_id)
def refresh(self):
"""
Re-fetch the attributes pertaining to this object.
Returns: self
"""
f_dict = self.gi.gi.libraries.show_folder(self.container.id, self.id)
self.__init__(f_dict, self.container, gi=self.gi)
return self
class Tool(Wrapper):
"""
Maps to a Galaxy tool.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'name',
'version',
)
POLLING_INTERVAL = 10 # for output state monitoring
def run(self, inputs, history, wait=False,
polling_interval=POLLING_INTERVAL):
"""
Execute this tool in the given history with inputs from dict
``inputs``.
:type inputs: dict
:param inputs: dictionary of input datasets and parameters for
the tool (see below)
:type history: :class:`History`
:param history: the history where to execute the tool
:type wait: bool
:param wait: whether to wait while the returned datasets are
in a pending state
:type polling_interval: float
:param polling_interval: polling interval in seconds
:rtype: list of :class:`HistoryDatasetAssociation`
:return: list of output datasets
The ``inputs`` dict should contain input datasets and parameters
in the (largely undocumented) format used by the Galaxy API.
Some examples can be found in `Galaxy's API test suite
<https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy_test/api/test_tools.py>`_.
The value of an input dataset can also be a :class:`Dataset`
object, which will be automatically converted to the needed
format.
"""
for k, v in inputs.items():
if isinstance(v, Dataset):
inputs[k] = {'src': v.SRC, 'id': v.id}
out_dict = self.gi.gi.tools.run_tool(history.id, self.id, inputs)
outputs = [history.get_dataset(_['id']) for _ in out_dict['outputs']]
if wait:
self.gi._wait_datasets(outputs, polling_interval=polling_interval)
return outputs
class Job(Wrapper):
"""
Maps to a Galaxy job.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
@abstractclass
class DatasetContainerPreview(Wrapper):
"""
Abstract base class for dataset container (history and library) 'previews'.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'name',
)
class LibraryPreview(DatasetContainerPreview):
"""
Models Galaxy library 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/libraries`` from Galaxy.
"""
class HistoryPreview(DatasetContainerPreview):
"""
Models Galaxy history 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/histories`` from Galaxy.
"""
BASE_ATTRS = DatasetContainerPreview.BASE_ATTRS + (
'annotation',
'published',
'purged',
'tags',
)
class WorkflowPreview(Wrapper):
"""
Models Galaxy workflow 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/workflows`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'deleted',
'latest_workflow_uuid',
'name',
'number_of_steps',
'owner',
'published',
'show_in_tool_panel',
'tags',
)
class InvocationPreview(Wrapper):
"""
Models Galaxy invocation 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/invocations`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + (
'history_id',
'id',
'state',
'update_time',
'uuid',
'workflow_id',
)
class JobPreview(Wrapper):
"""
Models Galaxy job 'previews'.
Instances of this class wrap dictionaries obtained by getting
``/api/jobs`` from Galaxy.
"""
BASE_ATTRS = Wrapper.BASE_ATTRS + ('state',)
| """
Upload a string to a new dataset in this history.
:type content: str
:param content: content of the new dataset to upload
See :meth:`~bioblend.galaxy.tools.ToolClient.upload_file` for
the optional parameters (except file_name).
:rtype: :class:`~.HistoryDatasetAssociation`
:return: the uploaded dataset
"""
out_dict = self.gi.gi.tools.paste_content(content, self.id, **kwargs)
self.refresh()
return self.get_dataset(out_dict['outputs'][0]['id']) |
list_server_gateway_status.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListServerGatewayStatusResult',
'AwaitableListServerGatewayStatusResult',
'list_server_gateway_status',
]
@pulumi.output_type
class ListServerGatewayStatusResult:
"""
Status of gateway is live.
"""
def __init__(__self__, status=None):
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Live message of list gateway.
"""
return pulumi.get(self, "status")
class AwaitableListServerGatewayStatusResult(ListServerGatewayStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
|
return ListServerGatewayStatusResult(
status=self.status)
def list_server_gateway_status(resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListServerGatewayStatusResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the Azure Resource group of which a given Analysis Services server is part. This name must be at least 1 character in length, and no more than 90.
:param str server_name: The name of the Analysis Services server.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:analysisservices/latest:listServerGatewayStatus', __args__, opts=opts, typ=ListServerGatewayStatusResult).value
return AwaitableListServerGatewayStatusResult(
status=__ret__.status)
| yield self |
channelpool.go | package tcppool
import (
"errors"
"fmt"
"sync"
"time"
)
type InitOptions struct {
//连接池中拥有的最小连接数
InitialCap int
//连接池中拥有的最大的连接数
MaxCap int
//生成连接的方法
Factory func() (interface{}, error)
//关闭链接的方法
Close func(interface{}) error
//链接最大空闲时间,超过该事件则将失效
IdleTimeout time.Duration
}
//channelPool 存放链接信息
type channelPool struct {
mu sync.Mutex
conns chan *idleConn
factory func() (interface{}, error)
close func(interface{}) error
idleTimeout time.Duration
}
type idleConn struct {
conn interface{}
t time.Time
}
//NewChannelPool 初始化链接
func NewChannelPool(options InitOptions) (TcpPool, error) {
if options.InitialCap < 0 || options.MaxCap <= 0 || options.InitialCap > options.MaxCap {
return nil, errors.New("invalid capacity settings")
}
c := &channelPool{
conns: make(chan *idleConn, options.MaxCap),
factory: options.Factory,
close: options.Close,
idleTimeout: options.IdleTimeout,
}
for i := 0; i < options.InitialCap; i++ {
conn, err := c.factory()
if err != nil {
c.Release()
return nil, fmt.Errorf("factory is not able to fill the pool: %s", err)
}
c.conns <- &idleConn{conn: conn, t: time.Now()}
}
return c, nil
}
//getConns 获取所有连接
func (c *channelPool) getConns() chan *idleConn {
c.mu.Lock()
conns := c.conns
c.mu.Unlock()
return conns
}
//Get 从pool中取一个连接
func (c *channelPool) Get() (interface{}, error) {
conns := c.getConns()
if conns == nil {
return nil, ErrClosed
}
for {
select {
case wrapConn := <-conns:
if wrapConn == nil {
return nil, ErrClosed
}
//判断是否超时,超时则丢弃
if timeout := c.idleTimeout; timeout > 0 {
if wrapConn.t.Add(timeout).Before(time.Now()) {
//丢弃并关闭该链接
c.Close(wrapConn.conn)
continue
}
}
return wrapConn.conn, nil
default:
conn, err := c.factory()
if err != nil {
return nil, err
}
| }
//Put 将连接放回pool中
func (c *channelPool) Put(conn interface{}) error {
if conn == nil {
return errors.New("connection is nil. rejecting")
}
c.mu.Lock()
defer c.mu.Unlock()
if c.conns == nil {
return c.Close(conn)
}
select {
case c.conns <- &idleConn{conn: conn, t: time.Now()}:
return nil
default:
//连接池已满,直接关闭该链接
return c.Close(conn)
}
}
//Close 关闭单条连接
func (c *channelPool) Close(conn interface{}) error {
if conn == nil {
return errors.New("connection is nil. rejecting")
}
return c.close(conn)
}
//Release 释放连接池中所有链接
func (c *channelPool) Release() {
c.mu.Lock()
conns := c.conns
c.conns = nil
c.factory = nil
closeFun := c.close
c.close = nil
c.mu.Unlock()
if conns == nil {
return
}
close(conns)
for wrapConn := range conns {
closeFun(wrapConn.conn)
}
}
//Len 连接池中已有的连接
func (c *channelPool) Len() int {
return len(c.getConns())
} | return conn, nil
}
} |
read_groups_from_bam.py | import argparse
import pysam
# parse the read group strings from a bam/sam header
# return array of read group strings
def | (bam_filename, use_libraries=False):
bam = pysam.AlignmentFile(bam_filename, "rb")
header = bam.header
results = {}
if 'RG' in header:
read_groups = header['RG']
if use_libraries:
field = 'LB'
else:
field = 'ID'
#print(read_groups)
for read_group in read_groups:
results[read_group[field]] = 1
#read_group['SM'] = sample
#print(read_group)
results_without_duplicates = [key for (key, ignored) in results.items()]
sorted_read_groups = sorted(results_without_duplicates)
return sorted_read_groups
def read_groups_and_libraries_from_bam(bam_filename):
bam = pysam.AlignmentFile(bam_filename, "rb")
header = bam.header
results = {}
if 'RG' in header:
read_groups = header['RG']
#print(read_groups)
for read_group in read_groups:
read_group_id = read_group['ID']
read_group_library = read_group['LB']
results[read_group_id] = read_group_library
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Show the read groups in a bam.")
parser.add_argument('-p', "--pulldown", help="report read groups colon-delimited for pulldown", action='store_true')
parser.add_argument('-l', "--libraries", help="report libraries instead of read groups", action='store_true')
parser.add_argument('-b', "--both", help="report read groups and libraries", action='store_true')
parser.add_argument("bam", help="bam for read groups")
args = parser.parse_args()
bam_filename = args.bam
if args.both:
read_groups_to_libraries = read_groups_and_libraries_from_bam(bam_filename)
for read_group, library in read_groups_to_libraries.items():
print("{}\t{}".format(read_group, library))
else:
read_groups = read_groups_from_bam(bam_filename, args.libraries)
if args.pulldown:
print(':'.join(read_groups))
else:
for read_group in read_groups:
print(read_group)
| read_groups_from_bam |
jqxdata.export.js | jQWidgets v5.3.2 (2017-Sep)
Copyright (c) 2011-2017 jQWidgets.
License: https://jqwidgets.com/license/
*/
(function(b){var a=(function(){var c={},u,q,j,l,g,h,o,p;function d(B,A,x,z,y,v,w){this.hierarchy=y;this.exportFormat=v;this.filename=w;B.beginFile(w);n(B);k(B);B.endFile(w);return B.getFile()}function n(z){var x=true;b.each(q,function(){if(this.hidden){x=false;return false}});z.beginHeader(x);var w=0;for(var v in q){if(q[v].columnsDataFields){v=q[v].columnsDataFields[w].displayfield}var y=m(v,q[v]);z.appendHeaderCell(q[v],v,y,x,w);w++}z.endHeader(x)}function k(x){x.beginBody();if(this.hierarchy){var w=function(z){for(var y=0;y<z.length;y+=1){if(z[y]!==undefined){x.beginRow(z[y].level);e(x,z[y],y,true);if(z[y].records){x.beginRows(z[y].level);w(z[y].records);x.endRows(z[y].level)}x.endRow(z[y].level)}}};w(u);x.endBody();return}for(var v=0;v<u.length;v+=1){if(u[v]!==undefined){e(x,u[v],v)}}x.endBody()}function e(A,z,B,v){var x;if(v!=true){A.beginRow()}var y=0;for(var w in q){if(q[w].columnsDataFields){w=q[w].columnsDataFields[y].displayfield}x=s(B,w);if(x){if(x.level!=undefined){if(x.index-1>z.level&&x.index-1<x.maxLevel){y++;continue}}if(x.maxLevel!=undefined){if(x.index-1==x.maxLevel){x=b.extend({},x);x.merge=x.maxLevel-z.level-1}}}if(z.level!=undefined&&z.label!=undefined){if(this.exportFormat==="xml"||this.exportFormat==="json"){var C={};C.text="group";A.appendBodyCell(z.label,C,x,z,y,"group");break}}if(z.hasOwnProperty(w)){A.appendBodyCell(z[w],q[w],x,z,y)}else{A.appendBodyCell("",q[w],x,z,y)}y++}if(v!=true){A.endRow()}}function m(w,x){if(x.style){return j[x.style]}var v=t();if(v.length>0){return v[0].style}return null}function t(){if(!g){g=new Array();b.each(j,function(v,w){g[g.length]={name:v,style:w}})}return g}function s(A,z){var B=q[z];if(B){if(B.customCellStyles){var x=B.customCellStyles[A];if(x){return j[x]}}if(B.cellStyle){if(B.cellAltStyle){var w=A%2;if(w==0){return j[B.cellStyle]}return j[B.cellAltStyle]}return j[B.cellStyle]}else{var v=t();if(v.length>0){var w=A%(v.length-1);var y=v[w+1].style;return y}}}return null}function r(y,w,x){var v=document.createElement("input");v.name=w;v.value=y;v.type="hidden";x.appendChild(v);return v}function f(x,v,w){var y=document.createElement("textarea");y.name=v;y.value=x;w.appendChild(y);return y}function i(w,z,y,v,A){var x=document.createElement("form");r(w,"filename",x);r(z,"format",x);f(y,"content",x);if(v==undefined||v==""){if(window&&window.location.toString().indexOf("jqwidgets.com")>=0){v="https://jqwidgets.com/export_server/dataexport.php"}else{v="https://jquerygrid.net/export_server/dataexport.php"}}x.action=v;x.method="post";if(A){x.acceptCharset=A}document.body.appendChild(x);return x}l=function(A,y,x,w,z,v){if(!(this instanceof a)){return new a(A,y,x,z,v)}u=A;q=y;j=x;this.exportTo=function(K,H,G,B){K=K.toString().toLowerCase();var D=c[K];if(typeof D==="undefined"){throw"You can't export to "+K+" format."}if(K==="pdf"&&B==undefined){var M=this.exportTo(K,H,K,"pdf");if(!b.jqx.pdfExport){b.jqx.pdfExport={orientation:"portrait",paperSize:"a4"}}var L=new pdfDataExport(b.jqx.pdfExport.orientation,"pt",b.jqx.pdfExport.paperSize);L.cellInitialize();var J=b(M).find("th");var I=b(M).find("tr");var N=0;L.setFontSize(13*72/96);var F=595;switch(b.jqx.pdfExport.paperSize){case"legal":var F=612;if(b.jqx.pdfExport.orientation!=="portrait"){F=1008}break;case"letter":var F=612;if(b.jqx.pdfExport.orientation!=="portrait"){F=792}break;case"a3":var F=841;if(b.jqx.pdfExport.orientation!=="portrait"){F=1190}break;case"a4":var F=595;if(b.jqx.pdfExport.orientation!=="portrait"){F=842}break;case"a5":var F=420;if(b.jqx.pdfExport.orientation!=="portrait"){F=595}break}F-=20;var E=0;var C=[];b.each(J,function(O){var P=parseInt(this.style.width);if(isNaN(P)){P=25}var Q=P*72/96;C[O]=Q;E+=Q});if(E>F){b.each(C,function(O){C[O]=(C[O]/E)*100;C[O]=C[O]*F/100})}b.each(J,function(P){var T=C[P];var S=25*72/96;var R=L.getTextDimensions(b(this).html());var Q=b(this).html();if(R.w+3>T){var O=L.splitTextToSize(Q,T-3);var U=O[0];if(U.length>3){Q=U.substring(0,U.length-3)+"..."}else{Q=U.substring(0,1)+"..."}var O=L.splitTextToSize(Q,T-3);var U=O[0];if(U!=Q){Q=U}}L.cell(10,10,T,S,Q,N)});N++;b.each(I,function(V){if(V===0){return true}var P=b(this).children();var Q=P.length>J.length;if(Q){var Y=P.length-J.length;var Z="";var X=C[0];var U=25*72/96;for(var R=0;R<=Y;R++){var O=P[R].innerHTML;if(O==="+"||O==="-"){O=O+" "}if(O===" "){O=" "}Z+=O}var T=L.getTextDimensions(Z);if(T.w+3>X){var W=L.splitTextToSize(Z,X-3);var S=W[0];if(S.length>3){Z=S.substring(0,S.length-3)+"..."}else{Z=S.substring(0,1)+"..."}var W=L.splitTextToSize(Z,X-3);var S=W[0];if(S!=Z){Z=S}}L.cell(10,10,X,U,Z,N);for(var R=Y+1;R<P.length;R++){var V=R-Y;var X=C[V];var U=25*72/96;var Z=b(P[R]).html();var T=L.getTextDimensions(b(P[R]).html());if(T.w+3>X){var W=L.splitTextToSize(Z,X-3);var S=W[0];if(S.length>3){Z=S.substring(0,S.length-3)+"..."}else{Z=S.substring(0,1)+"..."}var W=L.splitTextToSize(Z,X-3);var S=W[0];if(S!=Z){Z=S}}L.cell(10,10,X,U,Z,N)}N++;return true}b.each(P,function(ab){var af=C[ab];var ae=25*72/96;var ad=b(this).html();var ac=L.getTextDimensions(b(this).html());if(ac.w+3>af){var aa=L.splitTextToSize(ad,af-3);var ag=aa[0];if(ag.length>3){ad=ag.substring(0,ag.length-3)+"..."}else{ad=ag.substring(0,1)+"..."}var aa=L.splitTextToSize(ad,af-3);var ag=aa[0];if(ag!=ad){ad=ag}}L.cell(10,10,af,ae,ad,N)});N++});if(b.jqx.browser.msie&&b.jqx.browser.version<10){throw new Error("PDF export requires a browser with HTML5 support");return}return L}return d(D,u,q,j,H,G,B)};this.exportToFile=function(L,B,O,F,I){if(L==="pdf"){var N=this.exportTo(L,I,L,B);if(!b.jqx.pdfExport){b.jqx.pdfExport={orientation:"portrait",paperSize:"a4"}}var M=new pdfDataExport(b.jqx.pdfExport.orientation,"pt",b.jqx.pdfExport.paperSize);if(F=="utf-8"||F=="UTF-8"){M.setFont("courier","normal")}M.cellInitialize();var K=b(N).find("th");var J=b(N).find("tr");var P=0;M.setFontSize(13*72/96);var G=595;switch(b.jqx.pdfExport.paperSize){case"legal":var G=612;if(b.jqx.pdfExport.orientation!=="portrait"){G=1008}break;case"letter":var G=612;if(b.jqx.pdfExport.orientation!=="portrait"){G=792}break;case"a3":var G=841;if(b.jqx.pdfExport.orientation!=="portrait"){G=1190}break;case"a4":var G=595;if(b.jqx.pdfExport.orientation!=="portrait"){G=842}break;case"a5":var G=420;if(b.jqx.pdfExport.orientation!=="portrait"){G=595}break}G-=20;var E=0;var C=[];b.each(K,function(Q){var R=parseInt(this.style.width);if(isNaN(R)){R=25}var S=R*72/96;C[Q]=S;E+=S});if(E>G){b.each(C,function(Q){C[Q]=(C[Q]/E)*100;C[Q]=C[Q]*G/100})}b.each(K,function(R){var V=C[R];var U=25*72/96;var T=M.getTextDimensions(b(this).html());var S=b(this).html();if(T.w+3>V){var Q=M.splitTextToSize(S,V-3);var W=Q[0];if(W.length>3){S=W.substring(0,W.length-3)+"..."}else{S=W.substring(0,1)+"..."}var Q=M.splitTextToSize(S,V-3);var W=Q[0];if(W!=S){S=W}}M.cell(10,10,V,U,S,P)});P++;b.each(J,function(X){if(X===0){return true}var R=b(this).children();var S=R.length>K.length;if(S){var aa=R.length-K.length;var ab="";var Z=C[0];var W=25*72/96;for(var T=0;T<=aa;T++){var Q=R[T].innerHTML;if(Q==="+"||Q==="-"){Q=Q+" "}if(Q===" "){Q=" "}ab+=Q}var V=M.getTextDimensions(ab);if(V.w+3>Z){var Y=M.splitTextToSize(ab,Z-3);var U=Y[0];if(U.length>3){ab=U.substring(0,U.length-3)+"..."}else{ab=U.substring(0,1)+"..."}var Y=M.splitTextToSize(ab,Z-3);var U=Y[0];if(U!=ab){ab=U}}M.cell(10,10,Z,W,ab,P);for(var T=aa+1;T<R.length;T++){var X=T-aa;var Z=C[X];var W=25*72/96;var ab=b(R[T]).html();if(ab===" "){ab=" "}var V=M.getTextDimensions(b(R[T]).html());if(V.w+3>Z){var Y=M.splitTextToSize(ab,Z-3);var U=Y[0];if(U.length>3){ab=U.substring(0,U.length-3)+"..."}else{ab=U.substring(0,1)+"..."}var Y=M.splitTextToSize(ab,Z-3);var U=Y[0];if(U!=ab){ab=U}}M.cell(10,10,Z,W,ab,P)}P++;return true}b.each(R,function(ad){var ah=C[ad];var ag=25*72/96;var af=b(this).html();if(af===" "){af=" "}var ae=M.getTextDimensions(b(this).html());if(ae.w+3>ah){var ac=M.splitTextToSize(af,ah-3);var ai=ac[0];if(ai.length>3){af=ai.substring(0,ai.length-3)+"..."}else{af=ai.substring(0,1)+"..."}var ac=M.splitTextToSize(af,ah-3);var ai=ac[0];if(ai!=af){af=ai}}M.cell(10,10,ah,ag,af,P)});P++});if(b.jqx.browser.msie&&b.jqx.browser.version<10){throw new Error("PDF export requires a browser with HTML5 support");return}M.save(B+".pdf");return}var H=this.exportTo(L,I,L,B),D=i(B,L,H,O,F);D.submit();document.body.removeChild(D)};this.exportToLocalFile=function(F,C,D,B){var E=this.exportTo(F,D,B);document.location.href="data:application/octet-stream;filename="+C+","+encodeURIComponent(E)}};l.extend=function(v,w){if(w instanceof b.jqx.dataAdapter.DataExportModuleBase){c[v]=w}else{throw"The module "+v+" is not instance of DataExportModuleBase."}};return l}());b.jqx.dataAdapter.ArrayExporter=a})(jqxBaseFramework);(function(b){var a=function(){this.formatData=function(f,e,c,h){if(e==="date"){var d="";if(typeof f==="string"){d=b.jqx.dataFormat.tryparsedate(f);f=d}if(f===""||f===null){return""}d=b.jqx.dataFormat.formatdate(f,c,h);if(d.toString()=="NaN"||d==null){return""}f=d}else{if(e==="number"||e==="float"||e==="int"||e=="integer"){if(f===""||f===null){return""}if(!isNaN(new Number(f))){var g=b.jqx.dataFormat.formatnumber(f,c,h);if(g.toString()=="NaN"){return""}else{f=g}}}else{f=f}}if(f===null){return""}return f};this.getFormat=function(f){var c=f?f.formatString:"";var e=f?f.localization:"";var d="string";d=f?f.type:"string";if(d=="number"||d=="float"){if(!c){c="f2"}}if(d=="int"||d=="integer"){if(!c){c="n0"}}if(d=="date"){if(!c){c="d"}}return{type:d,formatString:c,localization:e}};this.beginFile=function(){throw"Not implemented!"};this.beginHeader=function(){throw"Not implemented!"};this.appendHeaderCell=function(){throw"Not implemented!"};this.endHeader=function(){throw"Not implemented!"};this.beginBody=function(){throw"Not implemented!"};this.beginRow=function(){throw"Not implemented!"};this.beginRows=function(){throw"Not implemented!"};this.endRows=function(){throw"Not implemented!"};this.appendBodyCell=function(){throw"Not implemented!"};this.endRow=function(){throw"Not implemented!"};this.endBody=function(){throw"Not implemented!"};this.endFile=function(){throw"Not implemented!"};this.getFile=function(){throw"Not implemented!"}};b.jqx.dataAdapter.DataExportModuleBase=a})(jqxBaseFramework);(function(d){var c=function(j){var e,h,g;var l=0;var i=this;this.beginFile=function(){e=""};this.beginHeader=function(){};this.appendHeaderCell=function(q,r,p,m,n){if(p){if(p.level!=undefined){if(n<p.maxLevel){return}else{if(n===p.maxLevel){if(m){k(q.text)}for(var o=0;o<p.maxLevel;o++){k("")}return}}}}g=m;if(m){k(q.text)}};this.endHeader=function(){this.endRow()};this.beginBody=function(){l=0};this.beginRow=function(){if((l>0)||(l==0&&g)){e+="\n"}l++};this.appendBodyCell=function(q,m,p,r,n){if(p){if(p.maxLevel!=undefined){if(n===p.maxLevel){k(q,m);for(var o=0;o<p.maxLevel-r.level-1;o++){k("",m)}return}}}k(q,m)};this.endRow=function(){e=e.substring(0,e.length-1)};this.endBody=function(){};this.endFile=function(){};this.getFile=function(){return e};function f(m,o){if(o){var n=i.getFormat(o);m=i.formatData(m,n.type,n.formatString,n.localization)}m='"'+m+'"';return m}function k(m,n){m=f(m,n);e+=m+j}};c.prototype=new d.jqx.dataAdapter.DataExportModuleBase();var a=function(){};a.prototype=new c(",");var b=function(){};b.prototype=new c("\t");d.jqx.dataAdapter.ArrayExporter.extend("csv",new a());d.jqx.dataAdapter.ArrayExporter.extend("tsv",new b())})(jqxBaseFramework);(function(d){var a=function(){var i=false;var g;var h;var j=0;this.setPDF=function(){i=true};this.beginFile=function(k){if(i||k==undefined){g='<table style="empty-cells: show;" cellspacing="0" cellpadding="2">'}else{g='<html>\n\t<head>\n\t\t<title></title>\n\t\t<meta http-equiv=Content-type content="text/html; charset=UTF-8">\n\t</head>\n\t<body>\n\t\t<table style="empty-cells: show;" cellspacing="0" cellpadding="2">'}};this.beginHeader=function(){if(i){g+="\n\t<thead><tr>"}else{g+="\n\t\t\t<thead>"}};this.appendHeaderCell=function(m,n,l,k){h=k;if(!k){return}if(i){g+='\n\t\t\t\t<th style="'+f(l)+'">'+m.text+"</th>"}else{if(l.disabled){return}if(l.merge){if(m.width){g+="\n\t\t\t\t<th colspan="+(1+l.merge)+' style="width: '+m.width+"px; "+f(l)+'">'+m.text+"</th>"}else{g+="\n\t\t\t\t<th colspan="+(1+l.merge)+' style="'+f(l)+'">'+m.text+"</th>"}}else{if(m.width){g+='\n\t\t\t\t<th style="width: '+m.width+"px; "+f(l)+'">'+m.text+"</th>"}else{g+='\n\t\t\t\t<th style="'+f(l)+'">'+m.text+"</th>"}}}};this.endHeader=function(){if(i){g+="\n\t</tr></thead>"}else{g+="\n\t\t\t</thead>"}};this.beginBody=function(){if(i){g+="\n\t<tbody>"}else{g+="\n\t\t\t<tbody>"}j=0};this.beginRow=function(){if(i){g+="\n\t<tr>"}else{g+="\n\t\t\t\t<tr>"}j++};this.appendBodyCell=function(l,n,k){var m=this.getFormat(n);if(l===""){l=" "}if(i){if(j==1&&!h){g+='\n\t\t\t\t\t<td style="'+f(k)+' border-top-width: 1px;">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}else{g+='\n\t\t\t\t\t<td style="'+f(k)+'">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}}else{if(k.merge){if(j==1&&!h){g+="\n\t\t\t\t\t<td colspan="+(1+k.merge)+' style="'+f(k)+' border-top-width: 1px;">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}else{g+="\n\t\t\t\t\t<td colspan="+(1+k.merge)+' style="'+f(k)+'">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}}else{if(j==1&&!h){g+='\n\t\t\t\t\t<td style="'+f(k)+' border-top-width: 1px;">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}else{g+='\n\t\t\t\t\t<td style="'+f(k)+'">'+this.formatData(l,m.type,m.formatString,m.localization)+"</td>"}}}};this.endRow=function(){if(i){g+="\n\t</tr>"}else{g+="\n\t\t\t\t</tr>"}};this.endBody=function(){if(i){g+="\n\t</tbody>"}else{g+="\n\t\t\t</tbody>"}};this.endFile=function(k){if(i||k==undefined){g+="\n</table>"}else{g+="\n\t\t</table>\n\t</body>\n</html>\n"}};this.getFile=function(){return g};function f(m){var k="";for(var l in m){if(m.hasOwnProperty(l)){if(i&&l=="font-size"){m[l]="100%"}k+=l+":"+m[l]+";"}}return k}};a.prototype=new d.jqx.dataAdapter.DataExportModuleBase();var e=function(){};e.prototype=new a();var c=function(){};c.prototype=new a();var b=new c();d.jqx.dataAdapter.ArrayExporter.extend("html",new e());d.jqx.dataAdapter.ArrayExporter.extend("pdf",b)})(jqxBaseFramework);(function(b){var a=function(){var h,l,d,i,c,j,m={style:"",stylesMap:{font:{color:"Color","font-family":"FontName","font-style":"Italic","font-weight":"Bold"},interior:{"background-color":"Color",background:"Color"},alignment:{left:"Left",center:"Center",right:"Right"}},startStyle:function(p){this.style+='\n\t\t<Style ss:ID="'+p+'" ss:Name="'+p+'">'},buildAlignment:function(q){if(q["text-align"]){var r=this.stylesMap.alignment[q["text-align"]];if(!r){r="Left"}var p='\n\t\t\t<Alignment ss:Vertical="Bottom" ss:Horizontal="'+r+'"/>';this.style+=p}},buildBorder:function(s){if(s["border-color"]){var r="\n\t\t\t<Borders>";var u='\n\t\t\t\t<Border ss:Position="Bottom" ss:LineStyle="Continuous" ss:Weight="1" ss:Color="'+s["border-color"]+'"/>';var p='\n\t\t\t\t<Border ss:Position="Left" ss:LineStyle="Continuous" ss:Weight="1" ss:Color="'+s["border-color"]+'"/>';var q='\n\t\t\t\t<Border ss:Position="Right" ss:LineStyle="Continuous" ss:Weight="1" ss:Color="'+s["border-color"]+'"/>';var t='\n\t\t\t\t<Border ss:Position="Top" ss:LineStyle="Continuous" ss:Weight="1" ss:Color="'+s["border-color"]+'"/>';r+=u;r+=p;r+=q;r+=t;r+="\n\t\t\t</Borders>";this.style+=r}},buildFont:function(q){var r=this.stylesMap.font,p="\n\t\t\t<Font ";for(var s in r){if(typeof q[s]!=="undefined"){if(s==="font-style"&&q[s].toString().toLowerCase()==="italic"){p+='ss:Italic="1" '}else{if(s==="font-weight"&&q[s].toString().toLowerCase()==="bold"){p+='ss:Bold="1" '}else{if(s==="color"){p+="ss:"+r[s]+'="'+q[s]+'" '}}}}}p+="/>";this.style+=p},buildInterior:function(q){var r=this.stylesMap.interior,t="\n\t\t\t<Interior ";var p=false;for(var s in r){if(typeof q[s]!=="undefined"){t+="ss:"+r[s]+'="'+q[s]+'" ';p=true}}if(p){t+='ss:Pattern="Solid"'}t+="/>";this.style+=t},buildFormat:function(q){if(q.dataType=="number"||q.dataType=="float"||q.dataType=="int"||q.dataType=="integer"){var p=q.formatString;if(p==""||p.indexOf("n")!=-1||p.indexOf("N")!=-1){this.style+='\n\t\t\t<NumberFormat ss:Format="0"/>'}else{if(p=="f"||p=="F"||p=="D"||p.indexOf("d")!=-1){this.style+='\n\t\t\t<NumberFormat ss:Format="#,##0.00_);[Red](#,##0.00)"/>'}else{if(p.indexOf("p")!=-1||p.indexOf("P")!=-1){this.style+='\n\t\t\t<NumberFormat ss:Format="Percent"/>'}else{if(p.indexOf("c")!=-1||p.indexOf("C")!=-1){if(parseInt(q.currencysymbol.charCodeAt(0))==8364){this.style+='\n\t\t\t<NumberFormat ss:Format="Euro Currency"/>'}else{this.style+='\n\t\t\t<NumberFormat ss:Format="Currency"/>'}}}}}}else{if(q.dataType=="date"){this.style+='\n\t\t\t<NumberFormat ss:Format="Short Date"/>'}}},closeStyle:function(){this.style+="\n\t\t</Style>"},toString:function(){var p=this.style;this.style="";return p}};this.beginFile=function(){c={};j=0;h='<?xml version="1.0"?>\n\t<?mso-application progid="Excel.Sheet"?> \n\t<Workbook xmlns="urn:schemas-microsoft-com:office:spreadsheet" \n\txmlns:o="urn:schemas-microsoft-com:office:office" \n\txmlns:x="urn:schemas-microsoft-com:office:excel" \n\txmlns:ss="urn:schemas-microsoft-com:office:spreadsheet" \n\txmlns:html="http://www.w3.org/TR/REC-html40"> \n\t<DocumentProperties xmlns="urn:schemas-microsoft-com:office:office"> \n\t<Version>12.00</Version> \n\t</DocumentProperties> \n\t<ExcelWorkbook xmlns="urn:schemas-microsoft-com:office:excel"> \n\t<WindowHeight>8130</WindowHeight> \n\t<WindowWidth>15135</WindowWidth> \n\t<WindowTopX>120</WindowTopX> \n\t<WindowTopY>45</WindowTopY> \n\t<ProtectStructure>False</ProtectStructure> \n\t<ProtectWindows>False</ProtectWindows> \n\t</ExcelWorkbook> \n\t<Styles>'};this.beginHeader=function(){l='\n\t<Worksheet ss:Name="Sheet1">\n\t\t<Table>';d=[];i=[]};this.appendHeaderCell=function(r,s,q){var p=r.width!=undefined?r.width:r.text.length*10;l+='\n\t\t\t<Column ss:Width="'+p+'"/>';d.push(r);i.push(q)};this.endHeader=function(p){if(p){this.beginRow();for(var q=0;q<d.length;q+=1){if(i[q].disabled){continue}g.call(this,d[q]["text"],null,i[q])}this.endRow()}};this.beginBody=function(){};this.beginRow=function(q){if(q!=undefined){l+="\n\t\t\t";for(var p=0;p<q;p++){l+="\t"}l+="<Row>";return}l+="\n\t\t\t<Row>"};this.beginRows=function(p){l+="\n\t\t\t\t<Rows>"};this.appendBodyCell=function(r,p,q,s){g.call(this,r,p,q,s)};this.endRow=function(q){if(q!=undefined){l+="\n\t\t\t";for(var p=0;p<q;p++){l+="\t"}l+="</Row>";return}l+="\n\t\t\t</Row>"};this.endRows=function(q){if(q!=undefined){l+="\n\t\t\t";for(var p=0;p<q;p++){l+="\t"}l+="</Rows>";return}};this.endBody=function(){l+="\n\t\t</Table>"};this.endFile=function(){l+="\n\t</Worksheet>\n</Workbook>";h+="\n\t</Styles>"};this.getFile=function(){return h+l};function g(s,v,r,u){var q="String";var t=this.getFormat(v);if(s!=null&&s.toString().substring(0,3)=="_AG"){s=s.toString().substring(3);q="String"}else{if(t.type=="date"){s=this.formatData(s,t.type,t.formatString,t.localization);if(s===null||s===""){s="";q="String"}}if(t.type=="string"){if(s===null||s===undefined){s=""}else{if(s.toString().indexOf("&")>=0){s=s.toString().replace(/&/g,"&")}if(s.toString().indexOf(">")>=0){s=s.toString().replace(/>/g,">")}if(s.toString().indexOf("<")>=0){s=s.toString().replace(/</g,"<")}if(s.toString().indexOf('"')>=0){s=s.toString().replace(/"/g,""")}if(s.toString().indexOf("'")>=0){s=s.toString().replace(/'/g,"'")}}}if(r.dataType=="number"||r.dataType=="float"||r.dataType=="int"||r.dataType=="integer"){q="Number";s=parseFloat(s);if(s===null||isNaN(s)||s===""){s="";q="String"}if(s&&q!="String"&&s!=""){if(v&&v.formatString&&v.formatString.indexOf("p")>=0){s=s/100}}r.currencysymbol=v.localization.currencysymbol}}var p=f(r);if(r.merge){l+='\n\t\t\t\t<Cell ss:MergeAcross="'+r.merge+'" ss:StyleID="'+p+'"><Data ss:Type="'+q+'">'+s+"</Data></Cell>"}else{l+='\n\t\t\t\t<Cell ss:StyleID="'+p+'"><Data ss:Type="'+q+'">'+s+"</Data></Cell>"}}function n(){j+=1;return"xls-style-"+j}function k(q){for(var p in c){if(o(q,c[p])&&o(c[p],q)){return p}}return undefined}function o(t,q){var s=true;for(var r in t){if(t[r]!==q[r]){s=false}}return s}function e(q,p){m.startStyle(q);m.buildAlignment(p);m.buildBorder(p);m.buildFont(p);m.buildInterior(p);m.buildFormat(p);m.closeStyle();h+=m.toString()}function f(p){if(!p){return""}var q=k(p);if(typeof q==="undefined"){q=n();c[q]=p;e(q,p)}return q}};a.prototype=new b.jqx.dataAdapter.DataExportModuleBase();b.jqx.dataAdapter.ArrayExporter.extend("xls",new a())})(jqxBaseFramework);(function(b){var a=function(){var e,c,d;this.beginFile=function(){e='<?xml version="1.0" encoding="UTF-8" ?>';e+="\n<table>"};this.beginHeader=function(){c=[]};this.appendHeaderCell=function(f,g){c.push(g)};this.endHeader=function(){};this.beginBody=function(g,f){};this.beginRow=function(g){if(g!=undefined){if(this.hierarchy){e+="\n\t";for(var f=0;f<g;f++){e+="\t\t"}e+="<row>";d=0;return}}e+="\n\t<row>";d=0};this.beginRows=function(g){if(g!=undefined){e+="\n\t\t";for(var f=0;f<g;f++){e+="\t\t"}e+="<rows>";d=0;return}e+="\n\t\t<rows>"};this.appendBodyCell=function(j,n,g,m,h,l){var k=this.getFormat(n);j=this.formatData(j,k.type,k.formatString,k.localization);if(k.type=="string"){if(j.toString().indexOf("&")>=0){j=j.toString().replace(/&/g,"&")}if(j.toString().indexOf(">")>=0){j=j.toString().replace(/>/g,">")}if(j.toString().indexOf("<")>=0){j=j.toString().replace(/</g,"<")}if(j.toString().indexOf('"')>=0){j=j.toString().replace(/"/g,""")}if(j.toString().indexOf("'")>=0){j=j.toString().replace(/'/g,"'")}}if(m.level!=undefined){if(this.hierarchy){e+="\n\t\t";for(var f=0;f<m.level;f++){e+="\t\t"}if(l===undefined){e+="<"+c[d]+">"+j+"</"+c[d]+">"}else{e+="<"+l+">"+j+"</"+l+">"}}else{if(l!=undefined){e+="\n\t\t<"+l+">"+j+"</"+l+">"}else{e+="\n\t\t<"+c[d]+">"+j+"</"+c[d]+">"}}}else{e+="\n\t\t<"+c[d]+">"+j+"</"+c[d]+">"}d++};this.endRow=function(g){if(g!=undefined){if(this.hierarchy){e+="\n\t";for(var f=0;f<g;f++){e+="\t\t"}e+="</row>";d=0;return}}e+="\n\t</row>";d=0};this.endRows=function(g){if(g!=undefined){e+="\n\t\t";for(var f=0;f<g;f++){e+="\t\t"}e+="</rows>";d=0;return}e+="\n\t\t</rows>"};this.endBody=function(){};this.endFile=function(){e+="\n</table>"};this.getFile=function(){return e}};a.prototype=new b.jqx.dataAdapter.DataExportModuleBase();b.jqx.dataAdapter.ArrayExporter.extend("xml",new a())})(jqxBaseFramework);(function(d){var j=/[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,l={"\b":"\\b","\t":"\\t","\n":"\\n","\f":"\\f","\r":"\\r",'"':'\\"',"\\":"\\\\"};function a(n){return'"'+n.replace(j,function(o){var p=l[o];return typeof p==="string"?p:"\\u"+("0000"+o.charCodeAt(0).toString(16)).slice(-4)})+'"'}function b(o){return o<10?"0"+o:o}function e(o){var n;if(isFinite(o.valueOf())){n=o.getUTCFullYear()+"-"+b(o.getUTCMonth()+1)+"-"+b(o.getUTCDate())+"T"+b(o.getUTCHours())+":"+b(o.getUTCMinutes())+":"+b(o.getUTCSeconds())+'Z"'}else{n="null"}return n}function g(q){var n=q.length,o=[],p;for(p=0;p<n;p++){o.push(h(p,q)||"null")}return"["+o.join(",")+"]"}function m(q){var o=[],p,n;for(p in q){if(Object.prototype.hasOwnProperty.call(q,p)){n=h(p,q);if(n){o.push(a(p)+":"+n)}}}return"{"+o.join(",")+"}"}function i(n){switch(Object.prototype.toString.call(n)){case"[object Date]":return e(n);case"[object Array]":return g(n)}return m(n)}function k(o,n){switch(n){case"string":return a(o);case"number":case"float":case"integer":case"int":return isFinite(o)?o:"null";case"boolean":return o}return"null"}function h(o,n){var q=n[o],p=typeof q;if(q&&typeof q==="object"&&typeof q.toJSON==="function"){q=q.toJSON(o);p=typeof q}if(/(number|float|int|integer|string|boolean)/.test(p)||(!q&&p==="object")){return k(q,p)}else{return i(q)}}function f(n){if(window.JSON&&typeof window.JSON.stringify==="function"){return window.JSON.stringify(n)}return h("",{"":n})}var c=function(){var q=this;this.prepareData=function(t,v){if(v){var u=q.getFormat(v);t=q.formatData(t,u.type,u.formatString,u.localization)}return t};var n,p,r,o=[],s=0;this.beginFile=function(){p=[]};this.beginHeader=function(){};this.appendHeaderCell=function(t){};this.endHeader=function(){};this.beginBody=function(u,t){};this.beginRow=function(){if(hierarchy){o[s]={}}else{r={}}};this.beginRows=function(){o[s].rows=[];s++;o[s]={}};this.endRows=function(){s--};this.appendBodyCell=function(u,t){var v=this.prepareData(u,t);if(hierarchy){o[s][t.text]=v}else{r[t.text]=v}};this.endRow=function(){if(hierarchy){if(s==0){p.push(o[s])}else{o[s-1].rows.push(o[s])}}else{p.push(r)}};this.endBody=function(){};this.endFile=function(){n=f(p)};this.getFile=function(){return n}};c.prototype=new d.jqx.dataAdapter.DataExportModuleBase();d.jqx.dataAdapter.ArrayExporter.extend("json",new c())})(jqxBaseFramework);var pdfDataExport=(function(){if(typeof btoa==="undefined"){window.btoa=function(m){var h="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",s=h.split(""),g,f,e,q,p,o,n,t,l=0,u=0,k="",j=[],d;do{g=m.charCodeAt(l++);f=m.charCodeAt(l++);e=m.charCodeAt(l++);t=g<<16|f<<8|e;q=t>>18&63;p=t>>12&63;o=t>>6&63;n=t&63;j[u++]=s[q]+s[p]+s[o]+s[n]}while(l<m.length);k=j.join("");d=m.length%3;return(d?k.slice(0,d-3):k)+"===".slice(d||3)}}if(typeof atob==="undefined"){window.atob=function(l){var g="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",f,e,d,p,o,n,m,q,k=0,r=0,h="",j=[];if(!l){return l}l+="";do{p=g.indexOf(l.charAt(k++));o=g.indexOf(l.charAt(k++));n=g.indexOf(l.charAt(k++));m=g.indexOf(l.charAt(k++));q=p<<18|o<<12|n<<6|m;f=q>>16&255;e=q>>8&255;d=q&255;if(n===64){j[r++]=String.fromCharCode(f)}else{if(m===64){j[r++]=String.fromCharCode(f,e)}else{j[r++]=String.fromCharCode(f,e,d)}}}while(k<l.length);h=j.join("");return h}}var c=typeof Object.keys==="function"?function(d){return Object.keys(d).length}:function(d){var f=0,g;for(g in d){if(d.hasOwnProperty(g)){f++}}return f},a=function(d){this.topics={};this.context=d;this.publish=function(h,m){if(this.topics[h]){var j=this.topics[h],o=[],n,g,e,f,k=function(){};m=Array.prototype.slice.call(arguments,1);for(g=0,e=j.length;g<e;g++){f=j[g];n=f[0];if(f[1]){f[0]=k;o.push(g)}n.apply(this.context,m)}for(g=0,e=o.length;g<e;g++){j.splice(o[g],1)}}};this.subscribe=function(e,g,f){if(!this.topics[e]){this.topics[e]=[[g,f]]}else{this.topics[e].push([g,f])}return{topic:e,callback:g}};this.unsubscribe=function(h){if(this.topics[h.topic]){var f=this.topics[h.topic],g,e;for(g=0,e=f.length;g<e;g++){if(f[g][0]===h.callback){f.splice(g,1)}}}}};function b(E,ae,M,V){if(typeof E==="undefined"){E="p"}else{E=E.toString().toLowerCase()}if(typeof ae==="undefined"){ae="mm"}if(typeof M==="undefined"){M="a4"}if(typeof V==="undefined"&&typeof zpipe==="undefined"){V=false}var at=M.toString().toLowerCase(),ao="0.9.0rc2",t=[],F=0,av=V,U="1.3",N={a3:[841.89,1190.55],a4:[595.28,841.89],a5:[420.94,595.28],letter:[612,792],legal:[612,1008]},ad="0 g",H="0 G",g=0,f=[],n=2,v=false,D=[],ai={},Q={},aj=16,d,z=0.200025,B,C,ak,O={title:"",subject:"",author:"",keywords:"",creator:""},R=0,T=0,P={},G=new a(P),af,aq,p=function(i){return i.toFixed(2)},o=function(i){return i.toFixed(3)},A=function(i){var k=(i).toFixed(0);if(i<10){return"0"+k}else{return k}},r=function(i){var k=(i).toFixed(0);if(k.length<10){return new Array(11-k.length).join("0")+k}else{return k}},ab=function(i){if(v){f[g].push(i)}else{t.push(i);F+=i.length+1}},w=function(){n++;D[n]=F;ab(n+" 0 obj");return n},K=function(i){ab("stream");ab(i);ab("endstream")},am,S,ap,al,aa=function(){am=C*ak;S=B*ak;var aB,aA,k,aw,ax,az,ay;for(aB=1;aB<=g;aB++){w();ab("<</Type /Page");ab("/Parent 1 0 R");ab("/Resources 2 0 R");ab("/Contents "+(n+1)+" 0 R>>");ab("endobj");aA=f[aB].join("\n");w();if(av){k=[];for(ax=0;ax<aA.length;++ax){k[ax]=aA.charCodeAt(ax)}ay=adler32cs.from(aA);az=new Deflater(6);az.append(new Uint8Array(k));aA=az.flush();k=[new Uint8Array([120,156]),new Uint8Array(aA),new Uint8Array([ay&255,(ay>>8)&255,(ay>>16)&255,(ay>>24)&255])];aA="";for(ax in k){if(k.hasOwnProperty(ax)){aA+=String.fromCharCode.apply(null,k[ax])}}ab("<</Length "+aA.length+" /Filter [/FlateDecode]>>")}else{ab("<</Length "+aA.length+">>")}K(aA);ab("endobj")}D[1]=F;ab("1 0 obj");ab("<</Type /Pages");ap="/Kids [";for(ax=0;ax<g;ax++){ap+=(3+2*ax)+" 0 R "}ab(ap+"]");ab("/Count "+g);ab("/MediaBox [0 0 "+p(am)+" "+p(S)+"]");ab(">>");ab("endobj")},X=function(i){i.objectNumber=w();ab("<</BaseFont/"+i.PostScriptName+"/Type/Font");if(typeof i.encoding==="string"){ab("/Encoding/"+i.encoding)}ab("/Subtype/Type1>>");ab("endobj")},J=function(){var i;for(i in ai){if(ai.hasOwnProperty(i)){X(ai[i])}}},L=function(){G.publish("putXobjectDict")},x=function(){ab("/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]");ab("/Font <<");var i;for(i in ai){if(ai.hasOwnProperty(i)){ab("/"+i+" "+ai[i].objectNumber+" 0 R")}}ab(">>");ab("/XObject <<");L();ab(">>")},h=function(){J();G.publish("putResources");D[2]=F;ab("2 0 obj");ab("<<");x();ab(">>");ab("endobj");G.publish("postPutResources")},m=function(aw,k,ax){var i;if(Q[k]===i){Q[k]={}}Q[k][ax]=aw},au={},u=function(i,ax,az,aw){var ay="F"+(c(ai)+1).toString(10),k=ai[ay]={id:ay,PostScriptName:i,fontName:ax,fontStyle:az,encoding:aw,metadata:{}};m(ay,ax,az);G.publish("addFont",k);return ay},e=function(){var k="helvetica",aG="times",aI="courier",aF="normal",aE="bold",aD="italic",aH="bolditalic",ax="StandardEncoding",aA=[["Helvetica",k,aF],["Helvetica-Bold",k,aE],["Helvetica-Oblique",k,aD],["Helvetica-BoldOblique",k,aH],["Courier",aI,aF],["Courier-Bold",aI,aE],["Courier-Oblique",aI,aD],["Courier-BoldOblique",aI,aH],["Times-Roman",aG,aF],["Times-Bold",aG,aE],["Times-Italic",aG,aD],["Times-BoldItalic",aG,aH]],aC,ay,aB,az;for(aC=0,ay=aA.length;aC<ay;aC++){var aw=ax;aB=u(aA[aC][0],aA[aC][1],aA[aC][2],aw);az=aA[aC][0].split("-");m(aB,az[0],az[1]||"")}G.publish("addFonts",{fonts:ai,dictionary:Q})},s=function(aG,ax){var aC,aA,az,ay,aE,aD,aw,aF,k,aB;if(ax===az){ax={}}ay=ax.sourceEncoding?ay:"Unicode";aD=ax.outputEncoding;if((ax.autoencode||aD)&&ai[d].metadata&&ai[d].metadata[ay]&&ai[d].metadata[ay].encoding){aE=ai[d].metadata[ay].encoding;if(!aD&&ai[d].encoding){aD=ai[d].encoding}if(!aD&&aE.codePages){aD=aE.codePages[0]}if(typeof aD==="string"){aD=aE[aD]}if(aD){aF=false;aw=[];for(aC=0,aA=aG.length;aC<aA;aC++){k=aD[aG.charCodeAt(aC)];if(k){aw.push(String.fromCharCode(k))}else{aw.push(aG[aC])}if(aw[aC].charCodeAt(0)>>8){aF=true}}aG=aw.join("")}}aC=aG.length;while(aF===az&&aC!==0){if(aG.charCodeAt(aC-1)>>8){aF=true}aC--}if(!aF){return aG}else{aw=ax.noBOM?[]:[254,255];for(aC=0,aA=aG.length;aC<aA;aC++){k=aG.charCodeAt(aC);aB=k>>8;if(aB>>8){throw new Error("Character at position "+aC.toString(10)+" of string '"+aG+"' exceeds 16bits. Cannot be encoded into UCS-2 BE")}aw.push(aB);aw.push(k-(aB<<8))}return String.fromCharCode.apply(az,aw)}},Z=function(k,i){return s(k,i).replace(/\\/g,"\\\\").replace(/\(/g,"\\(").replace(/\)/g,"\\)")},Y=function(){ab("/Producer (pdfDataExport "+ao+")");if(O.title){ab("/Title ("+Z(O.title)+")")}if(O.subject){ab("/Subject ("+Z(O.subject)+")")}if(O.author){ab("/Author ("+Z(O.author)+")")}if(O.keywords){ab("/Keywords ("+Z(O.keywords)+")")}if(O.creator){ab("/Creator ("+Z(O.creator)+")")}var i=new Date();ab("/CreationDate (D:"+[i.getFullYear(),A(i.getMonth()+1),A(i.getDate()),A(i.getHours()),A(i.getMinutes()),A(i.getSeconds())].join("")+")")},W=function(){ab("/Type /Catalog");ab("/Pages 1 0 R");ab("/OpenAction [3 0 R /FitH null]");ab("/PageLayout /OneColumn");G.publish("putCatalog")},l=function(){ab("/Size "+(n+1));ab("/Root "+n+" 0 R");ab("/Info "+(n-1)+" 0 R")},ar=function(){g++;v=true;f[g]=[]},ah=function(){ar();ab(p(z*ak)+" w");ab(H);if(R!==0){ab(R.toString(10)+" J")}if(T!==0){ab(T.toString(10)+" j")}G.publish("addPage",{pageNumber:g})},y=function(aw,ay){var i,k;if(aw===k){aw=ai[d].fontName}if(ay===k){ay=ai[d].fontStyle}try{i=Q[aw][ay]}catch(ax){i=k}if(!i){throw new Error("Unable to look up font label for font '"+aw+"', '"+ay+"'. Refer to getFontList() for available fonts.")}return i},q=function(){v=false;t=[];D=[];ab("%PDF-"+U);aa();h();w();ab("<<");Y();ab(">>");ab("endobj");w();ab("<<");W();ab(">>");ab("endobj");var aw=F,k;ab("xref");ab("0 "+(n+1));ab("0000000000 65535 f ");for(k=1;k<=n;k++){ab(r(D[k])+" 00000 n ")}ab("trailer");ab("<<");l();ab(">>");ab("startxref");ab(aw);ab("%%EOF");v=true;return t.join("\n")},ac=function(i){var k="S";if(i==="F"){k="f"}else{if(i==="FD"||i==="DF"){k="B"}}return k},I=function(az,aw){var ay,aB,aA,aC,ax,k;switch(az){case ay:return q();case"save":if(navigator.getUserMedia){if(window.URL===undefined){return P.output("dataurlnewwindow")}else{if(window.URL.createObjectURL===undefined){return P.output("dataurlnewwindow")}}}aB=q();aA=aB.length;aC=new Uint8Array(new ArrayBuffer(aA));for(ax=0;ax<aA;ax++){aC[ax]=aB.charCodeAt(ax)}k=new Blob([aC],{type:"application/pdf"});saveAs(k,aw);break;case"datauristring":case"dataurlstring":return"data:application/pdf;base64,"+btoa(q());case"datauri":case"dataurl":document.location.href="data:application/pdf;base64,"+btoa(q());break;case"dataurlnewwindow":window.open("data:application/pdf;base64,"+btoa(q()));break;default:throw new Error('Output type "'+az+'" is not supported.')}};if(ae==="pt"){ak=1}else{if(ae==="mm"){ak=72/25.4}else{if(ae==="cm"){ak=72/2.54}else{if(ae==="in"){ak=72}else{throw ("Invalid unit: "+ae)}}}}if(N.hasOwnProperty(at)){B=N[at][1]/ak;C=N[at][0]/ak}else{try{B=M[1];C=M[0]}catch(an){throw ("Invalid format: "+M)}}if(E==="p"||E==="portrait"){E="p";if(C>B){af=C;C=B;B=af}}else{if(E==="l"||E==="landscape"){E="l";if(B>C){af=C;C=B;B=af}}else{throw ("Invalid orientation: "+E)}}P.internal={pdfEscape:Z,getStyle:ac,getFont:function(){return ai[y.apply(P,arguments)]},getFontSize:function(){return aj},btoa:btoa,write:function(i,ax,aw,k){ab(arguments.length===1?i:Array.prototype.join.call(arguments," "))},getCoordinateString:function(i){return p(i*ak)},getVerticalCoordinateString:function(i){return p((B-i)*ak)},collections:{},newObject:w,putStream:K,events:G,scaleFactor:ak,pageSize:{width:C,height:B},output:function(k,i){return I(k,i)}};P.addPage=function(){ah();return this};var ag=["","0","00","000","0000"];var j=function(aA,aw){var ax=["FEFF"];for(var az=0,k=aA.length,ay;az<k;++az){ay=aA.charCodeAt(az).toString(16).toUpperCase();ax.push(ag[4-ay.length],ay)}return ax.join("")};P.text16=function(aF,aE,aC,aw){var ax,aA,az,aD,k,aB,ay;if(typeof aF==="number"){aA=aC;az=aF;aD=aE;aF=aA;aE=az;aC=aD}if(typeof aF==="string"&&aF.match(/[\n\r]/)){aF=aF.split(/\r\n|\r|\n/g)}if(typeof aw==="undefined"){aw={noBOM:true,autoencode:true}}else{if(aw.noBOM===ax){aw.noBOM=true}if(aw.autoencode===ax){aw.autoencode=true}}aw.autoencode=false;if(typeof aF==="string"){aB=j(aF,aw)}else{if(aF instanceof Array){k=aF.concat();for(ay=k.length-1;ay!==-1;ay--){k[ay]=j(k[ay],aw)}aB=k.join("> Tj\nT* <")}else{throw new Error('Type of text must be string or Array. "'+aF+'" is not recognized.')}}ab("BT\n/"+d+" "+aj+" Tf\n"+aj+" TL\n"+ad+"\n"+p(aE*ak)+" "+p((B-aC)*ak)+" Td\n<"+aB+"> Tj\nET");return this};P.text=function(aF,aE,aC,aw){var ax,aA,az,aD,k,aB,ay;if(typeof aF==="number"){aA=aC;az=aF;aD=aE;aF=aA;aE=az;aC=aD}if(typeof aF==="string"&&aF.match(/[\n\r]/)){aF=aF.split(/\r\n|\r|\n/g)}if(typeof aw==="undefined"){aw={noBOM:true,autoencode:true}}else{if(aw.noBOM===ax){aw.noBOM=true}if(aw.autoencode===ax){aw.autoencode=true}}if(typeof aF==="string"){aB=Z(aF,aw)}else{if(aF instanceof Array){k=aF.concat();for(ay=k.length-1;ay!==-1;ay--){k[ay]=Z(k[ay],aw)}aB=k.join(") Tj\nT* (")}else{throw new Error('Type of text must be string or Array. "'+aF+'" is not recognized.')}}ab("BT\n/"+d+" "+aj+" Tf\n"+aj+" TL\n"+ad+"\n"+p(aE*ak)+" "+p((B-aC)*ak)+" Td\n("+aB+") Tj\nET");return this};P.line=function(k,ax,i,aw){ab(p(k*ak)+" "+p((B-ax)*ak)+" m "+p(i*ak)+" "+p((B-aw)*ak)+" l S");return this};P.lines=function(k,aF,aE,aO,aK){var ay,aM,aC,aD,aB,aA,aI,aG,aN,aL,az,aJ,ax,aH,aw;if(typeof k==="number"){aM=aE;aC=k;aD=aF;k=aM;aF=aC;aE=aD}aK=ac(aK);aO=aO===ay?[1,1]:aO;ab(o(aF*ak)+" "+o((B-aE)*ak)+" m ");aB=aO[0];aA=aO[1];aG=k.length;aH=aF;aw=aE;for(aI=0;aI<aG;aI++){aN=k[aI];if(aN.length===2){aH=aN[0]*aB+aH;aw=aN[1]*aA+aw;ab(o(aH*ak)+" "+o((B-aw)*ak)+" l")}else{aL=aN[0]*aB+aH;az=aN[1]*aA+aw;aJ=aN[2]*aB+aH;ax=aN[3]*aA+aw;aH=aN[4]*aB+aH;aw=aN[5]*aA+aw;ab(o(aL*ak)+" "+o((B-az)*ak)+" "+o(aJ*ak)+" "+o((B-ax)*ak)+" "+o(aH*ak)+" "+o((B-aw)*ak)+" c")}}ab(aK);return this};P.rect=function(i,az,k,ax,aw){var ay=ac(aw);ab([p(i*ak),p((B-az)*ak),p(k*ak),p(-ax*ak),"re",ay].join(" "));return this};P.triangle=function(ax,aA,k,ay,i,aw,az){this.lines([[k-ax,ay-aA],[i-k,aw-ay],[ax-i,aA-aw]],ax,aA,[1,1],az);return this};P.roundedRect=function(k,aB,aw,ay,aA,az,ax){var i=4/3*(Math.SQRT2-1);this.lines([[(aw-2*aA),0],[(aA*i),0,aA,az-(az*i),aA,az],[0,(ay-2*az)],[0,(az*i),-(aA*i),az,-aA,az],[(-aw+2*aA),0],[-(aA*i),0,-aA,-(az*i),-aA,-az],[0,(-ay+2*az)],[0,-(az*i),(aA*i),-az,aA,-az]],k+aA,aB,[1,1],ax);return this};P.ellipse=function(i,aB,az,ay,k){var aA=ac(k),ax=4/3*(Math.SQRT2-1)*az,aw=4/3*(Math.SQRT2-1)*ay;ab([p((i+az)*ak),p((B-aB)*ak),"m",p((i+az)*ak),p((B-(aB-aw))*ak),p((i+ax)*ak),p((B-(aB-ay))*ak),p(i*ak),p((B-(aB-ay))*ak),"c"].join(" "));ab([p((i-ax)*ak),p((B-(aB-ay))*ak),p((i-az)*ak),p((B-(aB-aw))*ak),p((i-az)*ak),p((B-aB)*ak),"c"].join(" "));ab([p((i-az)*ak),p((B-(aB+aw))*ak),p((i-ax)*ak),p((B-(aB+ay))*ak),p(i*ak),p((B-(aB+ay))*ak),"c"].join(" "));ab([p((i+ax)*ak),p((B-(aB+ay))*ak),p((i+az)*ak),p((B-(aB+aw))*ak),p((i+az)*ak),p((B-aB)*ak),"c",aA].join(" "));return this};P.circle=function(i,ax,aw,k){return this.ellipse(i,ax,aw,aw,k)};P.setProperties=function(i){var k;for(k in O){if(O.hasOwnProperty(k)&&i[k]){O[k]=i[k]}}return this};P.setFontSize=function(i){aj=i;return this};P.setFont=function(i,k){d=y(i,k);return this};P.setFontStyle=P.setFontType=function(k){var i;d=y(i,k);return this};P.getFontList=function(){var aw={},k,ax,i;for(k in Q){if(Q.hasOwnProperty(k)){aw[k]=i=[];for(ax in Q[k]){if(Q[k].hasOwnProperty(ax)){i.push(ax)}}}}return aw};P.setLineWidth=function(i){ab((i*ak).toFixed(2)+" w");return this};P.setDrawColor=function(ay,ax,aw,i){var k;if(ax===undefined||(i===undefined&&ay===ax===aw)){if(typeof ay==="string"){k=ay+" G"}else{k=p(ay/255)+" G"}}else{if(i===undefined){if(typeof ay==="string"){k=[ay,ax,aw,"RG"].join(" ")}else{k=[p(ay/255),p(ax/255),p(aw/255),"RG"].join(" ")}}else{if(typeof ay==="string"){k=[ay,ax,aw,i,"K"].join(" ")}else{k=[p(ay),p(ax),p(aw),p(i),"K"].join(" ")}}}ab(k);return this};P.setFillColor=function(ay,ax,aw,i){var k;if(ax===undefined||(i===undefined&&ay===ax===aw)){if(typeof ay==="string"){k=ay+" g"}else{k=p(ay/255)+" g"}}else{if(i===undefined){if(typeof ay==="string"){k=[ay,ax,aw,"rg"].join(" ")}else{k=[p(ay/255),p(ax/255),p(aw/255),"rg"].join(" ")}}else{if(typeof ay==="string"){k=[ay,ax,aw,i,"k"].join(" ")}else{k=[p(ay),p(ax),p(aw),p(i),"k"].join(" ")}}}ab(k);return this};P.setTextColor=function(aw,k,i){if((aw===0&&k===0&&i===0)||(typeof k==="undefined")){ad=o(aw/255)+" g"}else{ad=[o(aw/255),o(k/255),o(i/255),"rg"].join(" ")}return this};P.CapJoinStyles={0:0,butt:0,but:0,bevel:0,1:1,round:1,rounded:1,circle:1,2:2,projecting:2,project:2,square:2,milter:2};P.setLineCap=function(i){var k=this.CapJoinStyles[i];if(k===undefined){throw new Error("Line cap style of '"+i+"' is not recognized. See or extend .CapJoinStyles property for valid styles")}R=k;ab(k.toString(10)+" J");return this};P.setLineJoin=function(i){var k=this.CapJoinStyles[i];if(k===undefined){throw new Error("Line join style of '"+i+"' is not recognized. See or extend .CapJoinStyles property for valid styles")}T=k;ab(k.toString(10)+" j");return this};P.output=I;P.save=function(i){P.output("save",i)};for(aq in b.API){if(b.API.hasOwnProperty(aq)){if(aq==="events"&&b.API.events.length){(function(ax,az){var ay,aw,k;for(k=az.length-1;k!==-1;k--){ay=az[k][0];aw=az[k][1];ax.subscribe.apply(ax,[ay].concat(typeof aw==="function"?[aw]:aw))}}(G,b.API.events))}else{P[aq]=b.API[aq]}}}e();d="F1";ah();G.publish("initialized");return P}b.API={events:[]};return b}());(function(i){var b=0,m=0,a,o,h,c={x:undefined,y:undefined,w:undefined,h:undefined,ln:undefined},f=1,e=false,d=function(p,t,q,r,s){c={x:p,y:t,w:q,h:r,ln:s}},k=function(){return c},j=function(p){b=p},l=function(){return b},n=function(p){m=p},g=function(p){return m};i.getTextDimensions=function(p){a=this.internal.getFont().fontName;o=this.internal.getFontSize();h=this.internal.getFont().fontStyle;var s=0.264583*72/25.4,q,r;r=document.createElement("font");r.id="pdfDataExportCell";r.style.fontStyle=h;r.style.fontName=a;r.style.fontSize=o+"pt";r.innerHTML=p;document.body.appendChild(r);q={w:(r.offsetWidth+1)*s,h:(r.offsetHeight+1)*s};document.body.removeChild(r);return q};i.cellAddPage=function(){this.addPage();d(undefined,undefined,undefined,undefined,undefined);e=true;f+=1;n(1)};i.cellInitialize=function(){b=0;c={x:undefined,y:undefined,w:undefined,h:undefined,ln:undefined};f=1;e=false;n(0)};i.cell=function(z,v,A,s,p,u){this.lnMod=this.lnMod===undefined?0:this.lnMod;if(this.printingHeaderRow!==true&&this.lnMod!==0){u=u+this.lnMod}if((((u*s)+v+(s*2))/f)>=this.internal.pageSize.height&&f===1&&!e){this.cellAddPage();if(this.printHeaders&&this.tableHeaderRow){this.printHeaderRow(u);this.lnMod+=1;u+=1}if(l()===0){j(Math.round((this.internal.pageSize.height-(s*2))/s))}}else{if(e&&k().ln!==u&&g()===l()){this.cellAddPage();if(this.printHeaders&&this.tableHeaderRow){this.printHeaderRow(u);this.lnMod+=1;u+=1}}}var B=k(),r=this.getTextDimensions(p),t=1;if(B.x!==undefined&&B.ln===u){z=B.x+B.w}if(B.y!==undefined&&B.y===v){v=B.y}if(B.h!==undefined&&B.h===s){s=B.h}if(B.ln!==undefined&&B.ln===u){u=B.ln;t=0}if(e){v=s*(g()+t)}else{v=(v+(s*Math.abs(l()*f-u-l())))}this.rect(z,v,A,s);var q=/[а-яА-ЯЁё]/.test(p);if(q){this.text16(p,z+3,v+s-3)}else{this.text(p,z+3,v+s-3)}n(g()+t);d(z,v,A,s,u);return this};i.getKeys=(typeof Object.keys==="function")?function(p){if(!p){return[]}return Object.keys(p)}:function(p){var q=[],r;for(r in p){if(p.hasOwnProperty(r)){q.push(r)}}return q};i.arrayMax=function(u,t){var p=u[0],q,s,r;for(q=0,s=u.length;q<s;q+=1){r=u[q];if(t){if(t(p,r)===-1){p=r}}else{if(r>p){p=r}}}return p};i.table=function(J,r,I){var v=[],p=[],D,z,B,x,E,y,G={},A={},u,s,H=[],C,F=[],t,q,w;this.lnMod=0;if(I){z=I.autoSize||false;B=this.printHeaders=I.printHeaders||true;x=I.autoStretch||true}if(!J){throw"No data for PDF table"}if(r===undefined||(r===null)){v=this.getKeys(J[0])}else{if(r[0]&&(typeof r[0]!=="string")){for(E=0,y=r.length;E<y;E+=1){D=r[E];v.push(D.name);p.push(D.prompt)}}else{v=r}}if(I.autoSize){w=function(K){return K[D]};for(E=0,y=v.length;E<y;E+=1){D=v[E];G[D]=J.map(w);H.push(this.getTextDimensions(p[E]||D).w);s=G[D];for(C=0,y=s.length;C<y;C+=1){u=s[C];H.push(this.getTextDimensions(u).w)}A[D]=i.arrayMax(H)}}if(I.printHeaders){for(E=0,y=v.length;E<y;E+=1){D=v[E];F.push([10,10,A[D],25,String(p.length?p[E]:D)])}this.setTableHeaderRow(F);this.printHeaderRow(1)}for(E=0,y=J.length;E<y;E+=1){t=J[E];for(C=0,q=v.length;C<q;C+=1){D=v[C];this.cell(10,10,A[D],25,String(t[D]),E+2)}}return this};i.setTableHeaderRow=function(p){this.tableHeaderRow=p};i.printHeaderRow=function(p){if(!this.tableHeaderRow){throw"Property tableHeaderRow does not exist."}var q,s,r,t;this.printingHeaderRow=true;for(r=0,t=this.tableHeaderRow.length;r<t;r+=1){q=this.tableHeaderRow[r];s=[].concat(q);this.cell.apply(this,s.concat(p))}this.printingHeaderRow=false}}(pdfDataExport.API));(function(c){var b=c.getCharWidthsArray=function(s,u){if(!u){u={}}var h=u.widths?u.widths:this.internal.getFont().metadata.Unicode.widths,r=h.fof?h.fof:1,n=u.kerning?u.kerning:this.internal.getFont().metadata.Unicode.kerning,p=n.fof?n.fof:1;var m,j,o,k,q=0,t=h[0]||r,g=[];for(m=0,j=s.length;m<j;m++){o=s.charCodeAt(m);g.push((h[o]||t)/r+(n[o]&&n[o][q]||0)/p);q=o}return g};var e=function(j){var h=j.length,g=0;while(h){h--;g+=j[h]}return g};var a=c.getStringUnitWidth=function(h,g){return e(b.call(this,h,g))};var d=function(g,n,h,j){var q=[];var m=0,k=g.length,p=0;while(m!==k&&p+n[m]<h){p+=n[m];m++}q.push(g.slice(0,m));var o=m;p=0;while(m!==k){if(p+n[m]>j){q.push(g.slice(o,m));p=0;o=m}p+=n[m];m++}if(o!==m){q.push(g.slice(o,m))}return q};var f=function(s,k,v){if(!v){v={}}var t=b(" ",v)[0];var r=s.split(" ");var w=[],x=[w],h=v.textIndent||0,u=0,p=0,g,q;var o,m,n;for(o=0,m=r.length;o<m;o++){g=r[o];q=b(g,v);p=e(q);if(h+u+p>k){if(p>k){n=d(g,q,k-(h+u),k);w.push(n.shift());w=[n.pop()];while(n.length){x.push([n.shift()])}p=e(q.slice(g.length-w[0].length))}else{w=[g]}x.push(w);h=p;u=t}else{w.push(g);h+=u+p;u=t}}var j=[];for(o=0,m=x.length;o<m;o++){j.push(x[o].join(" "))}return j};c.splitTextToSize=function(q,m,r){if(!r){r={}}var h=r.fontSize||this.internal.getFontSize(),g=(function(l){var t={0:1},i={};if(!l.widths||!l.kerning){var u=this.internal.getFont(l.fontName,l.fontStyle),s="Unicode";if(u.metadata[s]){return{widths:u.metadata[s].widths||t,kerning:u.metadata[s].kerning||i}}}else{return{widths:l.widths,kerning:l.kerning}}return{widths:t,kerning:i}}).call(this,r);var p;if(q.match(/[\n\r]/)){p=q.split(/\r\n|\r|\n/g)}else{p=[q]}var j=1*this.internal.scaleFactor*m/h;g.textIndent=r.textIndent?r.textIndent*1*this.internal.scaleFactor/h:0;var o,n,k=[];for(o=0,n=p.length;o<n;o++){k=k.concat(f(p[o],j,g))}return k}})(pdfDataExport.API);(function(c){var d="addImage_";var f=function(m){var l,h;if(!m.charCodeAt(0)===255||!m.charCodeAt(1)===216||!m.charCodeAt(2)===255||!m.charCodeAt(3)===224||!m.charCodeAt(6)==="J".charCodeAt(0)||!m.charCodeAt(7)==="F".charCodeAt(0)||!m.charCodeAt(8)==="I".charCodeAt(0)||!m.charCodeAt(9)==="F".charCodeAt(0)||!m.charCodeAt(10)===0){throw new Error("getJpegSize requires a binary jpeg file")}var j=m.charCodeAt(4)*256+m.charCodeAt(5);var k=4,g=m.length;while(k<g){k+=j;if(m.charCodeAt(k)!==255){throw new Error("getJpegSize could not find the size of the image")}if(m.charCodeAt(k+1)===192){h=m.charCodeAt(k+5)*256+m.charCodeAt(k+6);l=m.charCodeAt(k+7)*256+m.charCodeAt(k+8);return[l,h]}else{k+=2;j=m.charCodeAt(k)*256+m.charCodeAt(k+1)}}},b=function(g){var m=this.internal.newObject(),h=this.internal.write,l=this.internal.putStream;g.n=m;h("<</Type /XObject");h("/Subtype /Image");h("/Width "+g.w);h("/Height "+g.h);if(g.cs==="Indexed"){h("/ColorSpace [/Indexed /DeviceRGB "+(g.pal.length/3-1)+" "+(m+1)+" 0 R]")}else{h("/ColorSpace /"+g.cs);if(g.cs==="DeviceCMYK"){h("/Decode [1 0 1 0 1 0 1 0]")}}h("/BitsPerComponent "+g.bpc);if("f" in g){h("/Filter /"+g.f)}if("dp" in g){h("/DecodeParms <<"+g.dp+">>")}if("trns" in g&&g.trns.constructor==Array){var k="";for(var j=0;j<g.trns.length;j++){k+=(g[k][j]+" "+g.trns[j]+" ");h("/Mask ["+k+"]")}}if("smask" in g){h("/SMask "+(m+1)+" 0 R")}h("/Length "+g.data.length+">>");l(g.data);h("endobj")},e=function(){var g=this.internal.collections[d+"images"];for(var h in g){b.call(this,g[h])}},a=function(){var g=this.internal.collections[d+"images"],h=this.internal.write,k;for(var j in g){k=g[j];h("/I"+k.i,k.n,"0","R")}};c.addImage=function(g,s,q,p,t,l){if(typeof g==="object"&&g.nodeType===1){var j=document.createElement("canvas");j.width=g.clientWidth;j.height=g.clientHeight;var u=j.getContext("2d");if(!u){throw ("addImage requires canvas to be supported by browser.")}u.drawImage(g,0,0,j.width,j.height);g=j.toDataURL("image/jpeg");s="JPEG"}if(s.toUpperCase()!=="JPEG"){throw new Error("addImage currently only supports format 'JPEG', not '"+s+"'")}var i,n=this.internal.collections[d+"images"],m=this.internal.getCoordinateString,o=this.internal.getVerticalCoordinateString;if(g.substring(0,23)==="data:image/jpeg;base64,"){g=atob(g.replace("data:image/jpeg;base64,",""))}if(n){i=Object.keys?Object.keys(n).length:(function(w){var h=0;for(var v in w){if(w.hasOwnProperty(v)){h++}}return h})(n)}else{i=0;this.internal.collections[d+"images"]=n={};this.internal.events.subscribe("putResources",e);this.internal.events.subscribe("putXobjectDict",a)}var r=f(g);var k={w:r[0],h:r[1],cs:"DeviceRGB",bpc:8,f:"DCTDecode",i:i,data:g};n[i]=k;if(!t&&!l){t=-96;l=-96}if(t<0){t=(-1)*k.w*72/t/this.internal.scaleFactor}if(l<0){l=(-1)*k.h*72/l/this.internal.scaleFactor}if(t===0){t=l*k.w/k.h}if(l===0){l=t*k.h/k.w}this.internal.write("q",m(t),"0 0",m(l),m(q),o(p+l),"cm /I"+k.i,"Do Q");return this}})(pdfDataExport.API);(function(a){var e=function(q){var w="0123456789abcdef",o="klmnopqrstuvwxyz",h={};for(var r=0;r<o.length;r++){h[o[r]]=w[r]}var p,m={},n=1,t,k=m,g=[],s,l="",u="",v,j=q.length-1,f;r=1;while(r!=j){f=q[r];r+=1;if(f=="'"){if(t){v=t.join("");t=p}else{t=[]}}else{if(t){t.push(f)}else{if(f=="{"){g.push([k,v]);k={};v=p}else{if(f=="}"){s=g.pop();s[0][s[1]]=k;v=p;k=s[0]}else{if(f=="-"){n=-1}else{if(v===p){if(h.hasOwnProperty(f)){l+=h[f];v=parseInt(l,16)*n;n=+1;l=""}else{l+=f}}else{if(h.hasOwnProperty(f)){u+=h[f];k[v]=parseInt(u,16)*n;n=+1;v=p;u=""}else{u+=f}}}}}}}}return m};var d={codePages:["WinAnsiEncoding"],WinAnsiEncoding:e("{19m8n201n9q201o9r201s9l201t9m201u8m201w9n201x9o201y8o202k8q202l8r202m9p202q8p20aw8k203k8t203t8v203u9v2cq8s212m9t15m8w15n9w2dw9s16k8u16l9u17s9z17x8y17y9y}")},c={Unicode:{Courier:d,"Courier-Bold":d,"Courier-BoldOblique":d,"Courier-Oblique":d,Helvetica:d,"Helvetica-Bold":d,"Helvetica-BoldOblique":d,"Helvetica-Oblique":d,"Times-Roman":d,"Times-Bold":d,"Times-BoldItalic":d,"Times-Italic":d}},b={Unicode:{"Courier-Oblique":e("{'widths'{k3w'fof'6o}'kerning'{'fof'-6o}}"),"Times-BoldItalic":e("{'widths'{k3o2q4ycx2r201n3m201o6o201s2l201t2l201u2l201w3m201x3m201y3m2k1t2l2r202m2n2n3m2o3m2p5n202q6o2r1w2s2l2t2l2u3m2v3t2w1t2x2l2y1t2z1w3k3m3l3m3m3m3n3m3o3m3p3m3q3m3r3m3s3m203t2l203u2l3v2l3w3t3x3t3y3t3z3m4k5n4l4m4m4m4n4m4o4s4p4m4q4m4r4s4s4y4t2r4u3m4v4m4w3x4x5t4y4s4z4s5k3x5l4s5m4m5n3r5o3x5p4s5q4m5r5t5s4m5t3x5u3x5v2l5w1w5x2l5y3t5z3m6k2l6l3m6m3m6n2w6o3m6p2w6q2l6r3m6s3r6t1w6u1w6v3m6w1w6x4y6y3r6z3m7k3m7l3m7m2r7n2r7o1w7p3r7q2w7r4m7s3m7t2w7u2r7v2n7w1q7x2n7y3t202l3mcl4mal2ram3man3mao3map3mar3mas2lat4uau1uav3maw3way4uaz2lbk2sbl3t'fof'6obo2lbp3tbq3mbr1tbs2lbu1ybv3mbz3mck4m202k3mcm4mcn4mco4mcp4mcq5ycr4mcs4mct4mcu4mcv4mcw2r2m3rcy2rcz2rdl4sdm4sdn4sdo4sdp4sdq4sds4sdt4sdu4sdv4sdw4sdz3mek3mel3mem3men3meo3mep3meq4ser2wes2wet2weu2wev2wew1wex1wey1wez1wfl3rfm3mfn3mfo3mfp3mfq3mfr3tfs3mft3rfu3rfv3rfw3rfz2w203k6o212m6o2dw2l2cq2l3t3m3u2l17s3x19m3m}'kerning'{cl{4qu5kt5qt5rs17ss5ts}201s{201ss}201t{cks4lscmscnscoscpscls2wu2yu201ts}201x{2wu2yu}2k{201ts}2w{4qx5kx5ou5qx5rs17su5tu}2x{17su5tu5ou}2y{4qx5kx5ou5qx5rs17ss5ts}'fof'-6ofn{17sw5tw5ou5qw5rs}7t{cksclscmscnscoscps4ls}3u{17su5tu5os5qs}3v{17su5tu5os5qs}7p{17su5tu}ck{4qu5kt5qt5rs17ss5ts}4l{4qu5kt5qt5rs17ss5ts}cm{4qu5kt5qt5rs17ss5ts}cn{4qu5kt5qt5rs17ss5ts}co{4qu5kt5qt5rs17ss5ts}cp{4qu5kt5qt5rs17ss5ts}6l{4qu5ou5qw5rt17su5tu}5q{ckuclucmucnucoucpu4lu}5r{ckuclucmucnucoucpu4lu}7q{cksclscmscnscoscps4ls}6p{4qu5ou5qw5rt17sw5tw}ek{4qu5ou5qw5rt17su5tu}el{4qu5ou5qw5rt17su5tu}em{4qu5ou5qw5rt17su5tu}en{4qu5ou5qw5rt17su5tu}eo{4qu5ou5qw5rt17su5tu}ep{4qu5ou5qw5rt17su5tu}es{17ss5ts5qs4qu}et{4qu5ou5qw5rt17sw5tw}eu{4qu5ou5qw5rt17ss5ts}ev{17ss5ts5qs4qu}6z{17sw5tw5ou5qw5rs}fm{17sw5tw5ou5qw5rs}7n{201ts}fo{17sw5tw5ou5qw5rs}fp{17sw5tw5ou5qw5rs}fq{17sw5tw5ou5qw5rs}7r{cksclscmscnscoscps4ls}fs{17sw5tw5ou5qw5rs}ft{17su5tu}fu{17su5tu}fv{17su5tu}fw{17su5tu}fz{cksclscmscnscoscps4ls}}}"),"Helvetica-Bold":e("{'widths'{k3s2q4scx1w201n3r201o6o201s1w201t1w201u1w201w3m201x3m201y3m2k1w2l2l202m2n2n3r2o3r2p5t202q6o2r1s2s2l2t2l2u2r2v3u2w1w2x2l2y1w2z1w3k3r3l3r3m3r3n3r3o3r3p3r3q3r3r3r3s3r203t2l203u2l3v2l3w3u3x3u3y3u3z3x4k6l4l4s4m4s4n4s4o4s4p4m4q3x4r4y4s4s4t1w4u3r4v4s4w3x4x5n4y4s4z4y5k4m5l4y5m4s5n4m5o3x5p4s5q4m5r5y5s4m5t4m5u3x5v2l5w1w5x2l5y3u5z3r6k2l6l3r6m3x6n3r6o3x6p3r6q2l6r3x6s3x6t1w6u1w6v3r6w1w6x5t6y3x6z3x7k3x7l3x7m2r7n3r7o2l7p3x7q3r7r4y7s3r7t3r7u3m7v2r7w1w7x2r7y3u202l3rcl4sal2lam3ran3rao3rap3rar3ras2lat4tau2pav3raw3uay4taz2lbk2sbl3u'fof'6obo2lbp3xbq3rbr1wbs2lbu2obv3rbz3xck4s202k3rcm4scn4sco4scp4scq6ocr4scs4mct4mcu4mcv4mcw1w2m2zcy1wcz1wdl4sdm4ydn4ydo4ydp4ydq4yds4ydt4sdu4sdv4sdw4sdz3xek3rel3rem3ren3reo3rep3req5ter3res3ret3reu3rev3rew1wex1wey1wez1wfl3xfm3xfn3xfo3xfp3xfq3xfr3ufs3xft3xfu3xfv3xfw3xfz3r203k6o212m6o2dw2l2cq2l3t3r3u2l17s4m19m3r}'kerning'{cl{4qs5ku5ot5qs17sv5tv}201t{2ww4wy2yw}201w{2ks}201x{2ww4wy2yw}2k{201ts201xs}2w{7qs4qu5kw5os5qw5rs17su5tu7tsfzs}2x{5ow5qs}2y{7qs4qu5kw5os5qw5rs17su5tu7tsfzs}'fof'-6o7p{17su5tu5ot}ck{4qs5ku5ot5qs17sv5tv}4l{4qs5ku5ot5qs17sv5tv}cm{4qs5ku5ot5qs17sv5tv}cn{4qs5ku5ot5qs17sv5tv}co{4qs5ku5ot5qs17sv5tv}cp{4qs5ku5ot5qs17sv5tv}6l{17st5tt5os}17s{2kwclvcmvcnvcovcpv4lv4wwckv}5o{2kucltcmtcntcotcpt4lt4wtckt}5q{2ksclscmscnscoscps4ls4wvcks}5r{2ks4ws}5t{2kwclvcmvcnvcovcpv4lv4wwckv}eo{17st5tt5os}fu{17su5tu5ot}6p{17ss5ts}ek{17st5tt5os}el{17st5tt5os}em{17st5tt5os}en{17st5tt5os}6o{201ts}ep{17st5tt5os}es{17ss5ts}et{17ss5ts}eu{17ss5ts}ev{17ss5ts}6z{17su5tu5os5qt}fm{17su5tu5os5qt}fn{17su5tu5os5qt}fo{17su5tu5os5qt}fp{17su5tu5os5qt}fq{17su5tu5os5qt}fs{17su5tu5os5qt}ft{17su5tu5ot}7m{5os}fv{17su5tu5ot}fw{17su5tu5ot}}}"),Courier:e("{'widths'{k3w'fof'6o}'kerning'{'fof'-6o}}"),"Courier-BoldOblique":e("{'widths'{k3w'fof'6o}'kerning'{'fof'-6o}}"),"Times-Bold":e("{'widths'{k3q2q5ncx2r201n3m201o6o201s2l201t2l201u2l201w3m201x3m201y3m2k1t2l2l202m2n2n3m2o3m2p6o202q6o2r1w2s2l2t2l2u3m2v3t2w1t2x2l2y1t2z1w3k3m3l3m3m3m3n3m3o3m3p3m3q3m3r3m3s3m203t2l203u2l3v2l3w3t3x3t3y3t3z3m4k5x4l4s4m4m4n4s4o4s4p4m4q3x4r4y4s4y4t2r4u3m4v4y4w4m4x5y4y4s4z4y5k3x5l4y5m4s5n3r5o4m5p4s5q4s5r6o5s4s5t4s5u4m5v2l5w1w5x2l5y3u5z3m6k2l6l3m6m3r6n2w6o3r6p2w6q2l6r3m6s3r6t1w6u2l6v3r6w1w6x5n6y3r6z3m7k3r7l3r7m2w7n2r7o2l7p3r7q3m7r4s7s3m7t3m7u2w7v2r7w1q7x2r7y3o202l3mcl4sal2lam3man3mao3map3mar3mas2lat4uau1yav3maw3tay4uaz2lbk2sbl3t'fof'6obo2lbp3rbr1tbs2lbu2lbv3mbz3mck4s202k3mcm4scn4sco4scp4scq6ocr4scs4mct4mcu4mcv4mcw2r2m3rcy2rcz2rdl4sdm4ydn4ydo4ydp4ydq4yds4ydt4sdu4sdv4sdw4sdz3rek3mel3mem3men3meo3mep3meq4ser2wes2wet2weu2wev2wew1wex1wey1wez1wfl3rfm3mfn3mfo3mfp3mfq3mfr3tfs3mft3rfu3rfv3rfw3rfz3m203k6o212m6o2dw2l2cq2l3t3m3u2l17s4s19m3m}'kerning'{cl{4qt5ks5ot5qy5rw17sv5tv}201t{cks4lscmscnscoscpscls4wv}2k{201ts}2w{4qu5ku7mu5os5qx5ru17su5tu}2x{17su5tu5ou5qs}2y{4qv5kv7mu5ot5qz5ru17su5tu}'fof'-6o7t{cksclscmscnscoscps4ls}3u{17su5tu5os5qu}3v{17su5tu5os5qu}fu{17su5tu5ou5qu}7p{17su5tu5ou5qu}ck{4qt5ks5ot5qy5rw17sv5tv}4l{4qt5ks5ot5qy5rw17sv5tv}cm{4qt5ks5ot5qy5rw17sv5tv}cn{4qt5ks5ot5qy5rw17sv5tv}co{4qt5ks5ot5qy5rw17sv5tv}cp{4qt5ks5ot5qy5rw17sv5tv}6l{17st5tt5ou5qu}17s{ckuclucmucnucoucpu4lu4wu}5o{ckuclucmucnucoucpu4lu4wu}5q{ckzclzcmzcnzcozcpz4lz4wu}5r{ckxclxcmxcnxcoxcpx4lx4wu}5t{ckuclucmucnucoucpu4lu4wu}7q{ckuclucmucnucoucpu4lu}6p{17sw5tw5ou5qu}ek{17st5tt5qu}el{17st5tt5ou5qu}em{17st5tt5qu}en{17st5tt5qu}eo{17st5tt5qu}ep{17st5tt5ou5qu}es{17ss5ts5qu}et{17sw5tw5ou5qu}eu{17sw5tw5ou5qu}ev{17ss5ts5qu}6z{17sw5tw5ou5qu5rs}fm{17sw5tw5ou5qu5rs}fn{17sw5tw5ou5qu5rs}fo{17sw5tw5ou5qu5rs}fp{17sw5tw5ou5qu5rs}fq{17sw5tw5ou5qu5rs}7r{cktcltcmtcntcotcpt4lt5os}fs{17sw5tw5ou5qu5rs}ft{17su5tu5ou5qu}7m{5os}fv{17su5tu5ou5qu}fw{17su5tu5ou5qu}fz{cksclscmscnscoscps4ls}}}"),Helvetica:e("{'widths'{k3p2q4mcx1w201n3r201o6o201s1q201t1q201u1q201w2l201x2l201y2l2k1w2l1w202m2n2n3r2o3r2p5t202q6o2r1n2s2l2t2l2u2r2v3u2w1w2x2l2y1w2z1w3k3r3l3r3m3r3n3r3o3r3p3r3q3r3r3r3s3r203t2l203u2l3v1w3w3u3x3u3y3u3z3r4k6p4l4m4m4m4n4s4o4s4p4m4q3x4r4y4s4s4t1w4u3m4v4m4w3r4x5n4y4s4z4y5k4m5l4y5m4s5n4m5o3x5p4s5q4m5r5y5s4m5t4m5u3x5v1w5w1w5x1w5y2z5z3r6k2l6l3r6m3r6n3m6o3r6p3r6q1w6r3r6s3r6t1q6u1q6v3m6w1q6x5n6y3r6z3r7k3r7l3r7m2l7n3m7o1w7p3r7q3m7r4s7s3m7t3m7u3m7v2l7w1u7x2l7y3u202l3rcl4mal2lam3ran3rao3rap3rar3ras2lat4tau2pav3raw3uay4taz2lbk2sbl3u'fof'6obo2lbp3rbr1wbs2lbu2obv3rbz3xck4m202k3rcm4mcn4mco4mcp4mcq6ocr4scs4mct4mcu4mcv4mcw1w2m2ncy1wcz1wdl4sdm4ydn4ydo4ydp4ydq4yds4ydt4sdu4sdv4sdw4sdz3xek3rel3rem3ren3reo3rep3req5ter3mes3ret3reu3rev3rew1wex1wey1wez1wfl3rfm3rfn3rfo3rfp3rfq3rfr3ufs3xft3rfu3rfv3rfw3rfz3m203k6o212m6o2dw2l2cq2l3t3r3u1w17s4m19m3r}'kerning'{5q{4wv}cl{4qs5kw5ow5qs17sv5tv}201t{2wu4w1k2yu}201x{2wu4wy2yu}17s{2ktclucmucnu4otcpu4lu4wycoucku}2w{7qs4qz5k1m17sy5ow5qx5rsfsu5ty7tufzu}2x{17sy5ty5oy5qs}2y{7qs4qz5k1m17sy5ow5qx5rsfsu5ty7tufzu}'fof'-6o7p{17sv5tv5ow}ck{4qs5kw5ow5qs17sv5tv}4l{4qs5kw5ow5qs17sv5tv}cm{4qs5kw5ow5qs17sv5tv}cn{4qs5kw5ow5qs17sv5tv}co{4qs5kw5ow5qs17sv5tv}cp{4qs5kw5ow5qs17sv5tv}6l{17sy5ty5ow}do{17st5tt}4z{17st5tt}7s{fst}dm{17st5tt}dn{17st5tt}5o{ckwclwcmwcnwcowcpw4lw4wv}dp{17st5tt}dq{17st5tt}7t{5ow}ds{17st5tt}5t{2ktclucmucnu4otcpu4lu4wycoucku}fu{17sv5tv5ow}6p{17sy5ty5ow5qs}ek{17sy5ty5ow}el{17sy5ty5ow}em{17sy5ty5ow}en{5ty}eo{17sy5ty5ow}ep{17sy5ty5ow}es{17sy5ty5qs}et{17sy5ty5ow5qs}eu{17sy5ty5ow5qs}ev{17sy5ty5ow5qs}6z{17sy5ty5ow5qs}fm{17sy5ty5ow5qs}fn{17sy5ty5ow5qs}fo{17sy5ty5ow5qs}fp{17sy5ty5qs}fq{17sy5ty5ow5qs}7r{5ow}fs{17sy5ty5ow5qs}ft{17sv5tv5ow}7m{5ow}fv{17sv5tv5ow}fw{17sv5tv5ow}}}"),"Helvetica-BoldOblique":e("{'widths'{k3s2q4scx1w201n3r201o6o201s1w201t1w201u1w201w3m201x3m201y3m2k1w2l2l202m2n2n3r2o3r2p5t202q6o2r1s2s2l2t2l2u2r2v3u2w1w2x2l2y1w2z1w3k3r3l3r3m3r3n3r3o3r3p3r3q3r3r3r3s3r203t2l203u2l3v2l3w3u3x3u3y3u3z3x4k6l4l4s4m4s4n4s4o4s4p4m4q3x4r4y4s4s4t1w4u3r4v4s4w3x4x5n4y4s4z4y5k4m5l4y5m4s5n4m5o3x5p4s5q4m5r5y5s4m5t4m5u3x5v2l5w1w5x2l5y3u5z3r6k2l6l3r6m3x6n3r6o3x6p3r6q2l6r3x6s3x6t1w6u1w6v3r6w1w6x5t6y3x6z3x7k3x7l3x7m2r7n3r7o2l7p3x7q3r7r4y7s3r7t3r7u3m7v2r7w1w7x2r7y3u202l3rcl4sal2lam3ran3rao3rap3rar3ras2lat4tau2pav3raw3uay4taz2lbk2sbl3u'fof'6obo2lbp3xbq3rbr1wbs2lbu2obv3rbz3xck4s202k3rcm4scn4sco4scp4scq6ocr4scs4mct4mcu4mcv4mcw1w2m2zcy1wcz1wdl4sdm4ydn4ydo4ydp4ydq4yds4ydt4sdu4sdv4sdw4sdz3xek3rel3rem3ren3reo3rep3req5ter3res3ret3reu3rev3rew1wex1wey1wez1wfl3xfm3xfn3xfo3xfp3xfq3xfr3ufs3xft3xfu3xfv3xfw3xfz3r203k6o212m6o2dw2l2cq2l3t3r3u2l17s4m19m3r}'kerning'{cl{4qs5ku5ot5qs17sv5tv}201t{2ww4wy2yw}201w{2ks}201x{2ww4wy2yw}2k{201ts201xs}2w{7qs4qu5kw5os5qw5rs17su5tu7tsfzs}2x{5ow5qs}2y{7qs4qu5kw5os5qw5rs17su5tu7tsfzs}'fof'-6o7p{17su5tu5ot}ck{4qs5ku5ot5qs17sv5tv}4l{4qs5ku5ot5qs17sv5tv}cm{4qs5ku5ot5qs17sv5tv}cn{4qs5ku5ot5qs17sv5tv}co{4qs5ku5ot5qs17sv5tv}cp{4qs5ku5ot5qs17sv5tv}6l{17st5tt5os}17s{2kwclvcmvcnvcovcpv4lv4wwckv}5o{2kucltcmtcntcotcpt4lt4wtckt}5q{2ksclscmscnscoscps4ls4wvcks}5r{2ks4ws}5t{2kwclvcmvcnvcovcpv4lv4wwckv}eo{17st5tt5os}fu{17su5tu5ot}6p{17ss5ts}ek{17st5tt5os}el{17st5tt5os}em{17st5tt5os}en{17st5tt5os}6o{201ts}ep{17st5tt5os}es{17ss5ts}et{17ss5ts}eu{17ss5ts}ev{17ss5ts}6z{17su5tu5os5qt}fm{17su5tu5os5qt}fn{17su5tu5os5qt}fo{17su5tu5os5qt}fp{17su5tu5os5qt}fq{17su5tu5os5qt}fs{17su5tu5os5qt}ft{17su5tu5ot}7m{5os}fv{17su5tu5ot}fw{17su5tu5ot}}}"),"Courier-Bold":e("{'widths'{k3w'fof'6o}'kerning'{'fof'-6o}}"),"Times-Italic":e("{'widths'{k3n2q4ycx2l201n3m201o5t201s2l201t2l201u2l201w3r201x3r201y3r2k1t2l2l202m2n2n3m2o3m2p5n202q5t2r1p2s2l2t2l2u3m2v4n2w1t2x2l2y1t2z1w3k3m3l3m3m3m3n3m3o3m3p3m3q3m3r3m3s3m203t2l203u2l3v2l3w4n3x4n3y4n3z3m4k5w4l3x4m3x4n4m4o4s4p3x4q3x4r4s4s4s4t2l4u2w4v4m4w3r4x5n4y4m4z4s5k3x5l4s5m3x5n3m5o3r5p4s5q3x5r5n5s3x5t3r5u3r5v2r5w1w5x2r5y2u5z3m6k2l6l3m6m3m6n2w6o3m6p2w6q1w6r3m6s3m6t1w6u1w6v2w6w1w6x4s6y3m6z3m7k3m7l3m7m2r7n2r7o1w7p3m7q2w7r4m7s2w7t2w7u2r7v2s7w1v7x2s7y3q202l3mcl3xal2ram3man3mao3map3mar3mas2lat4wau1vav3maw4nay4waz2lbk2sbl4n'fof'6obo2lbp3mbq3obr1tbs2lbu1zbv3mbz3mck3x202k3mcm3xcn3xco3xcp3xcq5tcr4mcs3xct3xcu3xcv3xcw2l2m2ucy2lcz2ldl4mdm4sdn4sdo4sdp4sdq4sds4sdt4sdu4sdv4sdw4sdz3mek3mel3mem3men3meo3mep3meq4mer2wes2wet2weu2wev2wew1wex1wey1wez1wfl3mfm3mfn3mfo3mfp3mfq3mfr4nfs3mft3mfu3mfv3mfw3mfz2w203k6o212m6m2dw2l2cq2l3t3m3u2l17s3r19m3m}'kerning'{cl{5kt4qw}201s{201sw}201t{201tw2wy2yy6q-t}201x{2wy2yy}2k{201tw}2w{7qs4qy7rs5ky7mw5os5qx5ru17su5tu}2x{17ss5ts5os}2y{7qs4qy7rs5ky7mw5os5qx5ru17su5tu}'fof'-6o6t{17ss5ts5qs}7t{5os}3v{5qs}7p{17su5tu5qs}ck{5kt4qw}4l{5kt4qw}cm{5kt4qw}cn{5kt4qw}co{5kt4qw}cp{5kt4qw}6l{4qs5ks5ou5qw5ru17su5tu}17s{2ks}5q{ckvclvcmvcnvcovcpv4lv}5r{ckuclucmucnucoucpu4lu}5t{2ks}6p{4qs5ks5ou5qw5ru17su5tu}ek{4qs5ks5ou5qw5ru17su5tu}el{4qs5ks5ou5qw5ru17su5tu}em{4qs5ks5ou5qw5ru17su5tu}en{4qs5ks5ou5qw5ru17su5tu}eo{4qs5ks5ou5qw5ru17su5tu}ep{4qs5ks5ou5qw5ru17su5tu}es{5ks5qs4qs}et{4qs5ks5ou5qw5ru17su5tu}eu{4qs5ks5qw5ru17su5tu}ev{5ks5qs4qs}ex{17ss5ts5qs}6z{4qv5ks5ou5qw5ru17su5tu}fm{4qv5ks5ou5qw5ru17su5tu}fn{4qv5ks5ou5qw5ru17su5tu}fo{4qv5ks5ou5qw5ru17su5tu}fp{4qv5ks5ou5qw5ru17su5tu}fq{4qv5ks5ou5qw5ru17su5tu}7r{5os}fs{4qv5ks5ou5qw5ru17su5tu}ft{17su5tu5qs}fu{17su5tu5qs}fv{17su5tu5qs}fw{17su5tu5qs}}}"),"Times-Roman":e("{'widths'{k3n2q4ycx2l201n3m201o6o201s2l201t2l201u2l201w2w201x2w201y2w2k1t2l2l202m2n2n3m2o3m2p5n202q6o2r1m2s2l2t2l2u3m2v3s2w1t2x2l2y1t2z1w3k3m3l3m3m3m3n3m3o3m3p3m3q3m3r3m3s3m203t2l203u2l3v1w3w3s3x3s3y3s3z2w4k5w4l4s4m4m4n4m4o4s4p3x4q3r4r4s4s4s4t2l4u2r4v4s4w3x4x5t4y4s4z4s5k3r5l4s5m4m5n3r5o3x5p4s5q4s5r5y5s4s5t4s5u3x5v2l5w1w5x2l5y2z5z3m6k2l6l2w6m3m6n2w6o3m6p2w6q2l6r3m6s3m6t1w6u1w6v3m6w1w6x4y6y3m6z3m7k3m7l3m7m2l7n2r7o1w7p3m7q3m7r4s7s3m7t3m7u2w7v3k7w1o7x3k7y3q202l3mcl4sal2lam3man3mao3map3mar3mas2lat4wau1vav3maw3say4waz2lbk2sbl3s'fof'6obo2lbp3mbq2xbr1tbs2lbu1zbv3mbz2wck4s202k3mcm4scn4sco4scp4scq5tcr4mcs3xct3xcu3xcv3xcw2l2m2tcy2lcz2ldl4sdm4sdn4sdo4sdp4sdq4sds4sdt4sdu4sdv4sdw4sdz3mek2wel2wem2wen2weo2wep2weq4mer2wes2wet2weu2wev2wew1wex1wey1wez1wfl3mfm3mfn3mfo3mfp3mfq3mfr3sfs3mft3mfu3mfv3mfw3mfz3m203k6o212m6m2dw2l2cq2l3t3m3u1w17s4s19m3m}'kerning'{cl{4qs5ku17sw5ou5qy5rw201ss5tw201ws}201s{201ss}201t{ckw4lwcmwcnwcowcpwclw4wu201ts}2k{201ts}2w{4qs5kw5os5qx5ru17sx5tx}2x{17sw5tw5ou5qu}2y{4qs5kw5os5qx5ru17sx5tx}'fof'-6o7t{ckuclucmucnucoucpu4lu5os5rs}3u{17su5tu5qs}3v{17su5tu5qs}7p{17sw5tw5qs}ck{4qs5ku17sw5ou5qy5rw201ss5tw201ws}4l{4qs5ku17sw5ou5qy5rw201ss5tw201ws}cm{4qs5ku17sw5ou5qy5rw201ss5tw201ws}cn{4qs5ku17sw5ou5qy5rw201ss5tw201ws}co{4qs5ku17sw5ou5qy5rw201ss5tw201ws}cp{4qs5ku17sw5ou5qy5rw201ss5tw201ws}6l{17su5tu5os5qw5rs}17s{2ktclvcmvcnvcovcpv4lv4wuckv}5o{ckwclwcmwcnwcowcpw4lw4wu}5q{ckyclycmycnycoycpy4ly4wu5ms}5r{cktcltcmtcntcotcpt4lt4ws}5t{2ktclvcmvcnvcovcpv4lv4wuckv}7q{cksclscmscnscoscps4ls}6p{17su5tu5qw5rs}ek{5qs5rs}el{17su5tu5os5qw5rs}em{17su5tu5os5qs5rs}en{17su5qs5rs}eo{5qs5rs}ep{17su5tu5os5qw5rs}es{5qs}et{17su5tu5qw5rs}eu{17su5tu5qs5rs}ev{5qs}6z{17sv5tv5os5qx5rs}fm{5os5qt5rs}fn{17sv5tv5os5qx5rs}fo{17sv5tv5os5qx5rs}fp{5os5qt5rs}fq{5os5qt5rs}7r{ckuclucmucnucoucpu4lu5os}fs{17sv5tv5os5qx5rs}ft{17ss5ts5qs}fu{17sw5tw5qs}fv{17sw5tw5qs}fw{17ss5ts5qs}fz{ckuclucmucnucoucpu4lu5os5rs}}}"),"Helvetica-Oblique":e("{'widths'{k3p2q4mcx1w201n3r201o6o201s1q201t1q201u1q201w2l201x2l201y2l2k1w2l1w202m2n2n3r2o3r2p5t202q6o2r1n2s2l2t2l2u2r2v3u2w1w2x2l2y1w2z1w3k3r3l3r3m3r3n3r3o3r3p3r3q3r3r3r3s3r203t2l203u2l3v1w3w3u3x3u3y3u3z3r4k6p4l4m4m4m4n4s4o4s4p4m4q3x4r4y4s4s4t1w4u3m4v4m4w3r4x5n4y4s4z4y5k4m5l4y5m4s5n4m5o3x5p4s5q4m5r5y5s4m5t4m5u3x5v1w5w1w5x1w5y2z5z3r6k2l6l3r6m3r6n3m6o3r6p3r6q1w6r3r6s3r6t1q6u1q6v3m6w1q6x5n6y3r6z3r7k3r7l3r7m2l7n3m7o1w7p3r7q3m7r4s7s3m7t3m7u3m7v2l7w1u7x2l7y3u202l3rcl4mal2lam3ran3rao3rap3rar3ras2lat4tau2pav3raw3uay4taz2lbk2sbl3u'fof'6obo2lbp3rbr1wbs2lbu2obv3rbz3xck4m202k3rcm4mcn4mco4mcp4mcq6ocr4scs4mct4mcu4mcv4mcw1w2m2ncy1wcz1wdl4sdm4ydn4ydo4ydp4ydq4yds4ydt4sdu4sdv4sdw4sdz3xek3rel3rem3ren3reo3rep3req5ter3mes3ret3reu3rev3rew1wex1wey1wez1wfl3rfm3rfn3rfo3rfp3rfq3rfr3ufs3xft3rfu3rfv3rfw3rfz3m203k6o212m6o2dw2l2cq2l3t3r3u1w17s4m19m3r}'kerning'{5q{4wv}cl{4qs5kw5ow5qs17sv5tv}201t{2wu4w1k2yu}201x{2wu4wy2yu}17s{2ktclucmucnu4otcpu4lu4wycoucku}2w{7qs4qz5k1m17sy5ow5qx5rsfsu5ty7tufzu}2x{17sy5ty5oy5qs}2y{7qs4qz5k1m17sy5ow5qx5rsfsu5ty7tufzu}'fof'-6o7p{17sv5tv5ow}ck{4qs5kw5ow5qs17sv5tv}4l{4qs5kw5ow5qs17sv5tv}cm{4qs5kw5ow5qs17sv5tv}cn{4qs5kw5ow5qs17sv5tv}co{4qs5kw5ow5qs17sv5tv}cp{4qs5kw5ow5qs17sv5tv}6l{17sy5ty5ow}do{17st5tt}4z{17st5tt}7s{fst}dm{17st5tt}dn{17st5tt}5o{ckwclwcmwcnwcowcpw4lw4wv}dp{17st5tt}dq{17st5tt}7t{5ow}ds{17st5tt}5t{2ktclucmucnu4otcpu4lu4wycoucku}fu{17sv5tv5ow}6p{17sy5ty5ow5qs}ek{17sy5ty5ow}el{17sy5ty5ow}em{17sy5ty5ow}en{5ty}eo{17sy5ty5ow}ep{17sy5ty5ow}es{17sy5ty5qs}et{17sy5ty5ow5qs}eu{17sy5ty5ow5qs}ev{17sy5ty5ow5qs}6z{17sy5ty5ow5qs}fm{17sy5ty5ow5qs}fn{17sy5ty5ow5qs}fo{17sy5ty5ow5qs}fp{17sy5ty5qs}fq{17sy5ty5ow5qs}7r{5ow}fs{17sy5ty5ow5qs}ft{17sv5tv5ow}7m{5ow}fv{17sv5tv5ow}fw{17sv5tv5ow}}}")}};a.events.push(["addFonts",function(i){var f,g,h,k,j="Unicode",l;for(g in i.fonts){if(i.fonts.hasOwnProperty(g)){f=i.fonts[g];h=b[j][f.PostScriptName];if(h){if(f.metadata[j]){k=f.metadata[j]}else{k=f.metadata[j]={}}k.widths=h.widths;k.kerning=h.kerning}l=c[j][f.PostScriptName];if(l){if(f.metadata[j]){k=f.metadata[j]}else{k=f.metadata[j]={}}k.encoding=l;if(l.codePages&&l.codePages.length){f.encoding=l.codePages[0]}}}}}])})(pdfDataExport.API);var saveAs=saveAs||(navigator.msSaveBlob&&navigator.msSaveBlob.bind(navigator))||(function(h){var r=h.document,l=function(){return h.URL||h.webkitURL||h},e=h.URL||h.webkitURL||h,n=$("<a></a>")[0],g="download" in n,j=function(t){var s=r.createEvent("MouseEvents");s.initMouseEvent("click",true,false,h,0,0,0,0,0,false,false,false,false,0,null);return t.dispatchEvent(s)},o=h.webkitRequestFileSystem,p=h.requestFileSystem||o||h.mozRequestFileSystem,m=function(s){(h.setImmediate||h.setTimeout)(function(){throw s},0)},c="application/octet-stream",k=0,b=[],i=function(){var t=b.length;while(t--){var s=b[t];if(typeof s==="string"){e.revokeObjectURL(s)}else{s.remove()}}b.length=0},q=function(t,s,w){s=[].concat(s);var v=s.length;while(v--){var x=t["on"+s[v]];if(typeof x==="function"){try{x.call(t,w||t)}catch(u){m(u)}}}},f=function(t,u){var v=this,B=t.type,E=false,x,w,s=function(){var F=l().createObjectURL(t);b.push(F);return F},A=function(){q(v,"writestart progress write writeend".split(" "))},D=function(){if(E||!x){x=s(t)}if(w){w.location.href=x}v.readyState=v.DONE;A()},z=function(F){return function(){if(v.readyState!==v.DONE){return F.apply(this,arguments)}}},y={create:true,exclusive:false},C;v.readyState=v.INIT;if(!u){u="download"}if(g){x=s(t);n.href=x;n.download=u;if(j(n)){v.readyState=v.DONE;A();return}}if(h.chrome&&B&&B!==c){C=t.slice||t.webkitSlice;t=C.call(t,0,t.size,c);E=true}if(o&&u!=="download"){u+=".download"}if(B===c||o){w=h}else{w=h.open()}if(!p){D();return}k+=t.size;p(h.TEMPORARY,k,z(function(F){F.root.getDirectory("saved",y,z(function(G){var H=function(){G.getFile(u,y,z(function(I){I.createWriter(z(function(J){J.onwriteend=function(K){w.location.href=I.toURL();b.push(I);v.readyState=v.DONE;q(v,"writeend",K)};J.onerror=function(){var K=J.error;if(K.code!==K.ABORT_ERR){D()}};"writestart progress write abort".split(" ").forEach(function(K){J["on"+K]=v["on"+K]});J.write(t);v.abort=function(){J.abort();v.readyState=v.DONE};v.readyState=v.WRITING}),D)}),D)};G.getFile(u,{create:false},z(function(I){I.remove();H()}),z(function(I){if(I.code===I.NOT_FOUND_ERR){H()}else{D()}}))}),D)}),D)},d=f.prototype,a=function(s,t){return new f(s,t)};d.abort=function(){var s=this;s.readyState=s.DONE;q(s,"abort")};d.readyState=d.INIT=0;d.WRITING=1;d.DONE=2;d.error=d.onwritestart=d.onprogress=d.onwrite=d.onabort=d.onerror=d.onwriteend=null;if(h.addEventListener){h.addEventListener("unload",i,false)}return a}(self));(function(a){var b="pdfDataExport IE Below 9 Shim plugin";a.output=function(e,d){return this.internal.output(e,d);var c="Output.pdf";switch(e){case"datauristring":case"dataurlstring":case"datauri":case"dataurl":case"dataurlnewwindow":if(console){console.log(b+": Data URIs are not supported on IE6-9.")}break;case"save":c=d;break}}})(pdfDataExport.API); | /* |
|
font.go | // Auto-generated - DO NOT EDIT!
package ubuntumonoregular
import (
"github.com/gmlewis/go-fonts/fonts"
)
// Available glyphs:
// !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ ¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿŒœŸˆ˜–—‘’‚“”„•…‹›€™fiflffiffl
var ubuntumonoregularFont = &fonts.Font{
ID: "ubuntumonoregular",
HorizAdvX: 1024,
UnitsPerEm: 2048,
Ascent: 1638,
Descent: -410,
MissingHorizAdvX: 500,
Glyphs: map[rune]*fonts.Glyph{},
}
func init() {
fonts.Fonts["ubuntumonoregular"] = ubuntumonoregularFont
fonts.InitFromFontData(ubuntumonoregularFont, fontData)
}
var fontData | eNrs/XmQXNd1Joi/LAAklNbvN9kZDAeaw+h4piARorgkVwDEdrEX9kQBtVcBr/YqACSysBZIAHwAQRIkATBJgCQku93P8gbbcqu8yZCslq4Iu4fjXqbco4ngjBUTaXU7Qh3T0a6ekDvQHnXMRCLPue9+N897KHCRBDL5D1gvM9+77y5n/c53snPyGf9+L/s/z85nfunupsHBe++auyWf86r/+c+qG/9Wzql7Pzv3SH7ujb/Cw9W/RukvPYyf6Xvnzx3P+x78t0PT/9T+LfXpe++ae4ie4t1Tu6p2wdUv1K4GPdWr+/jq/bWrYa+2xuD1yt9R9ljm0Rg2K+f+ShwLXDXjVvc2zX3WmqHC0drdoh4lvLfeVvs0XFP7N+iEu3bQSFbbvw15fPSfMt+C2TF3xKtauKo6tLmD9ZxNGtfIfMseY2jueOPN75//GX7tf1T79mL9j2/8u+CM+qUb/zN1UWW/NyufudfZS2GpNuLiJXu/6DbYSwP2Z6pd2kthG43z87V/J8fsdw2fopXaY++PsBXu9ICG+Q8X8W9gdej+02Pumk+d/qm8yaIZvEnBeZOnZvAmvIZhidbw7Guqtob6ZVrDb7+lsj+cnc987u6mwQF7DTeRPDhkv53f5755fBbDrbRPD9nfCfZq6/eVwP7M22//3ivS7geZ4+MzttEsHLZnKtgnjQPvo5yxivcJFIxVw7tqYRzBIfs70V6cK5Bb+0FuFTXNroZ3xWfYKxC/60d0n0A7e336SO17pRMw/zi326Q19vfduNf9/+NneOvUNlnzBUtQ/N9z85n5d2dsOaG7aS93wTPW1a7qPpCVtB+9JbU3Lga1v9UG+yRNjdAdF4L8a0E56+2mv++zz47eQb9VKBXp6gY4ZwtqV/2ipAvCDShtdKv924jWTG+Dp7eTJmuGq9303TVKkueP02rQt8J19m99kvLhVri6na7CmFiaeLul9wlorkOaoQhkjr+PZrPL3v3BHjjpICOjMVidnfT7XG0MU/3092P2yga0VzwFe3sl7f8+uONaepsCrRLPIqzs5CDNxCP2O/OM83+FIc3a0dazW+iOvaK0X2vuXBvddvgt7w7YuYp3HzxHk7RXqM+341mY5rOw2f5t1EOrBU+PeA07QAMtq10t9Envo3bRLJNtF4H2mnyGZqzLlvTh0yD9BuzP9D57xv2Afp+rjSG3h/5eCqeml+Z0PWg23U2arTJK1snlcyR0HnhbZf/hznzm83fPGRwcHADltsEIBGszqbOKpZU99H4F36FNehFer98VpuGq2r3OXhAFxACJuC21f8tPw2EcpG27075aMldh4cwd8CptT7ha4g2PdzBXQfCYO7gm0fWw9sQFB6Q3K9GbeeKbFekd1E7pfRWMS4tvxuNy3pfeAe9QNFdBdJs7uG+mBtPWzGMBvbj2rel++1l+C6+ofbWy3awzmHt8ByUYgXi1QuIa7+C3aOFp4WJtRoZvVjlDx/tA6pstkt4sfgf5feEdFotvtlh6s/gd5PfV4txoMGtXsWuyhA7/+2xxLLqksv/wC/nMF+6e5Rx9smCKB8WpYHlLAqI4ag9uijb31DBo440kr2hKgtba3wV4XaXiSRCeSzqQJ7Dqfdo6W7F/KOiGcDNs7166+hiO0bGAuhR7FfY0t9FV1Ew0YxrFxkIaU4+oAXlLkZ1dGAVdPkpzMQy6nN9yh31VtdhP3+doS9WGPpICvzpcSiJzN2zHR2tXJ0GwR3307o/CHRcYPV/7l+yN8h7JGsgNgZZeT6Mge49ndzqAeaQ3KfXbCkUNw51W4g4LSQ/7aCEXjXivzXI//dsLB3fIvIn92x1o9QR9RtjZe6uL9P922IdFbew/67t0ZNVuV8z6T8c+hHQSNNjdZZgt1Um/LcIqPYoWpLfa/A1WIZ4Bzbv9cSXZVstp37GvgFEfElNeO1lTNF/BEBgLdDUagCfQbmG5oSg2EGFMbLk2uwXnr3yw9sx5r4umBd+d5iTslqw/P4CZYUnCHjydhWIgeUKFQHwu+1gURYq6YY/QO6pVouSk/yocl1oLa76e3mg3Cv9tLPwL2nE31SWV/bdN+cx96G4Gz948mKOGZxDMWUfDnEtHCQNzPh1OXM776Lt49UEyrLvAmuyCp/oaxEn8GyXeXwljCfDq3PjAWhMaPEsTGrzKQaLTVpDoV2blMwtwQivk+S04JUZE95OMJo3UcRIU/WHaxcfA96czWZ0ma0I8e0L0CMq3BQdobx0GOddJV5+VllD3aNBihb2wLLSXy6C19C4a27B9tTBMywK6TLXHy23t96fp6lDtN9cP1/5uPgYzQ2MbPQnLU9nHy3OY9vvVs7Q8pbdV9r1Z+cwX5eU5cwmEUrsyO8UeGl0l9XCdlqf5GNiC5FKOnhQndbcG0aX2w4K0SZZ7gRaxAgo1GCOzYcSeVBYi1ZFbm2MuuO8UAAooiLDoAP172J7igNRI87Pixu0xQaTayPeCIUd2axlMQ5/evWoaWleHaUaGb2Upv3VnPnM/LiWfocvn4LRwTAy0jveA4iyI/XIsn5eRrUITpLeBy9QXRyzqY88s40O6Ry2mak7Nl2JtF4tQ7yFYrGH8TBSvbIvQ/SYHMFJBW2MAThbFjsPVHMdQbD1av51mG2+rPF8wt2wxD0G8dYGWLAvaEh5tmNyAZrvNntsBjtqB1UubzeuGO5Lz623WYPdozH75Jr9lXa30xfEY67uUA/MWKCFSxppf0dzrAOaCLLyoD+ZtO1+VbFU+Jmon31EczXwNNjiNphOeM5/e8n4YOUWJ2Z/TZobgtz1ShDnglXgSjj+N1PuC4pgVvREcXtXLdkdIarLjnBXm/s+ZfOZLeHhDkriFE5BegFRKLdHKn5WPwmf9+BmkHA7jPfEzO+g2Cb8L+/EzHAsaWu38wm30wv55euH3L6jsX87KZx7AF86RhaoGJU/Og3irR4Ey3ssRn+SFYHtuMufb1sirpSg871ZvHsky0sUeRPBZjuhW0ZrlmDQbXeBBKvae18JvdyhRjahNIL9Wx35NivVOkqSC5mKLmD2n6KwPEV2OW3sbYTFzB2kxSwdI9Uy8xLt3XGVP5jMP4lpOj9PMPweSexDyP8fxM8hFHYfnT4/T88PnaDPN5UykPq2y32jKZx7CAUSUlgpkt8NJuztuByUN9Hp03py0O6eh1mvB3dHd4kJt1ZA098HNUpxwWI9he3R3THJnvZSG97tB7ERH3DS85jR87oDKXsjkMw87E0fazj8M2uFU7eqVXwa3BywIfUrTdyADOgQrGfXRgHw2Yi6/RgMK3lbZr92ZzxTunjU4IEnC778greX1gwpSPVfAYWjmT4cgeTCGa9lhvgW2p7kjXtXC1Y6D2twhfk5lFNeu+aDt8MdJKm1Gjk57VEp7c58CK94XKTQ1Ku3E4iDY4i2842Hn9UpppPiqEu+gxKeBcWdGhldpB4+KQJJBfJoUGFC9WhhZfFWLd9Di07Q4Mjd8MjVRe5NzZ0Tbf6NJAtee1Q5v1hrPo3WuNziCf6e0Bp65I16l0w5XNQfBMO3nhEvCVi0oxdDcUclanGPlufNWuOTfNOUzjzg+B0W7Sxhn6kKBWxwz9pcdKSevuDIAhkWvrScuvQE+/jPJhkzxRZ1oOPnj9u/eew3GutBY3KyJKZsOh2O7FHWKdsRLYC0XL8xyzWgHNcCKzDYDf+/OfOZR2Yf7D28oIWbNMcywWQm5fMYmKLBhKmzzwgaIdnFsX4EAw00akGOs22DjkYAulkSlxxFmSujo3XA8i1Lgi3ENPngurCAVIP40YxNaYNvTATe+CvuHOPKt0lVOHFXvGO+ZRaeT95p3Ft9cow/ixZ6DZfE8IZomrexxU9wfvGX2pTHTw35ezd6s82J6QEvz/dvg6RTl5mh3wD474j02cP4Erj6meBdI78M2+COaJYr9W4rSVm3xerQJIg29VqPuUsSbwTI+iPkSybfmDIyHJ3sVfXdFfFp76bROnLOk35/PzWcec07rLtvjMJ4wCXEfsnh8leMkfleM2rCiDxT1C5+0dxgnnfm/qX1KnEfyGobEldnO+S7NLymoTW+HEowfVLyFII4SWHJ7rU7EAHpbJa/crCHJE+9zsP5tEtLKL0pvzklpr0vMFnXxbygoBznXKQouqnXw2zbcZ5P9RqrZUpTjxxu1EAnxAyVlllbQugfSmyiSwFrBaDpxxsIhgx2zZ4F0RwBYHt4VQRd890mSLKKH6cXvRZY/YHlo/XWXlB/i2GGBxljploAC5b2i7mDNucTEyASJEOySgsMeZp4ZH7QMs60J0uOfwBq0m+wYYK80z5w9pmaD4jKxn12uEdVsi5F/PSufedzF0ZL8VYCjDSGbGK5UEHVgLVwegfH08fzC2El+TMNVzjBUzXXrHA/aY1jwCgSLIMgUYCAJgkUKsKbhIH5mP+/qKdf41pRmmD5uf69wDPTxaQnryJGRCsmtINCC4zXVj+g52muDknGtBo1WCFtIK0ydt2y4f7gjn3nCSakEH1wrcNIBtUJZ1Aom/sQoRFp7Z98avAZITLIgfYh9ew/HdkCKBlmohXgXn10N1sI0xUcV4E8q+7WAP2G8LOf32NLwhyQns4B5BYoMomz37lEMpbN2Uu4FsMsAU1E5DhHoeZKtaVKjLE2oRiTswLh7fNX6bU6jJOIqlB32t3yKp3nzKb9P7ihazYpQB5cPiDKcHT+uARiQ9jfiCab7JFmqx0RZ2jwTWcrpuPARWBeWpU9I1mH4mJGllSBRllYP38tz85knHVlqwnnPi7bAiDkNtSTlIdiXdIKmEJC4j9E39tX32DfaCFb4Q0rAGqturHfSHCgC/T3FuQTU371pUd7iCK4JvR1lrCvsh6+Ct2Tf7wGQRF0oTSLyzpWC0XB+uxW0LcfjwdcLH49RS5I0wXzYFNSA8adRgE/XAi45NyzVMwV7HAuOgnGTfRJa6fKEG4ybHE/dR04eCHWKovi9gvnQxsuSZjRcASu51VmNTiVhrbZLNiQjm7CiwLtPGfksoLd3G3Q410gAfoG0k5xD4f92sccmhbnVKtFnfhxPRtU3s2TzWl2H4TaSQLMk+NFFlf1fmvKZhaiGfVbDb4BR1W+rgnmvAMoHk5QcLGgxW7G23SDumiOROV0CM5deet4hCXFdhIKiCsSQOYjgU+pdUaxRHZSwCVfxKr3xlXEI9ZBQ01BsUXjBGDZ+4KbsqrJVZ/OZRS4wll2paREYyzkSRmX7EPGdZNgAuEWMJWH3gf+eOpBqgjxFpmuAakWKVXt0wCotEkw3XApb2nG0Kmx4LZccrWgHmJckBKMuSQgGIgTW5IDYgB0B93J3DGqzBZsCJ43/LhwSzYBBUwJas7Fgn3KGIgIjhYGs/F+Z1RW44t5uqSaAhUIF8wWPcdjPFbOach5T4o7iODUbff52KecRFqWcBwKUDV7nMTjj25yAZ3ecfq7fVyHk4DwuQUPh+2AMKhHWm3YOG2clQAJ5BP2qQGjNOMXrNCCoamFRMxfrjMPiQkHJUJknVyzQSBQpnAjMxOlRhkYgEFhhIGc373x4G0qL5wbF5y5RkINRW1GFSWVpXBSH6xB2SM4Ey1qTb+FvPaolWDJkdeOnSwXoUdEoJa8rUSlVXf2v35nPLHbMU5agV15Lk29swE2BsWKkH0g9Y0AA8D/H5hsUk/t7lGMUxeaq9RwaY3lCkiiTR2nOCGL5/VOQGT1lg/ldR6oCYPRoH45mHhXPV8CFLJDJfgUkhHE/FmoIVXqrtIC48+6XzXEKpw5zCBhcl16xCLIoOr9kxlRdBKdeiMzI916TzS7jHtfmdkSCjfpFCcZm9gyb8wvA7We8wkp8roKiUP47XA020uq08hM+B1ipo1bHUP56R0/BHAa74vkW7C1j8kohVwaUISSRT6xqlfDUYZtwYnNuKWD1xH5vdj7zlENU8CnHlggz8d7ZmcxEKM5E+CFmIvy5R9lMnlfZ/21OPrPE2UNe6acDdlMzArtpEezmfbrAbo197e5rr+TC/qbtff1uJp9Zii4256VygO3zoWbu6tmYacOKhq9XxuWOrzafIROrhFfpu3CVo4VRHyDOvE56CdVDXm2RvdrceZX9q0w+s8xloqFMSqUEZT0QKCi8ip9p+KyusJvuePnMh7pjHSynUnI99QWv1BZmubwwGshTimYi8SpPurxA8mJCsrdfw3di2pSZL8x/vyOfWeEuDFl+91xUQmiME2JciOZtgiAnp8wfk4BJ/F9xyCTFbfuEE8YtcOJI4gYgVThlEnWmnmGSXqpbJIgAOIoJju+UrmL1unwH+2kWCApWiQNuCbZeG3p4uQGpqNiDirFKh2gBrTMlkmIZB33aLFmPnNpF2ep6dkGvBAHhq2p9nbQ/NBOKLk+k6FIyRdcmJRaII0VXKFJ0hSJFlydTdG1OSLkgcEam6ApHXKtFvWr7qtl8RrlpadL5vhgCN3Eo0t9XjoBXypHCZyCVMmBLFbNHRoxtYb/tMM7qe+RJYfqfn7MAJNK8l8RILkHSjFe4Ukq0ePdiSZSTRgi0YI1VKI2A6VTmKQh3S0wH3oi4/0YUxEPnHYC1JTuy0iHCA/lUEcTEm/4ukAsoTl9K6OKA01kEkhmDdSPbqYJRLWdugkCCXYWdWgKCkE3qoJgJilc8II8ROShyWDvI8EeoEozovXKYoGGg46a0hLoi7eA9BSk+JgySU3xkRXucNF0KkdV+nLFFJY4pwQ6heEQFceOM/94P36VSqvfGkzhY9ElR8pKU8O5SXHkpEDdU0J+mXEQF4q0BGTvhEp2yJ9UqE3GwT/5a4+vYwBKUN4+i9+IRYK/6G2tsTH/gpcG4PE5wQxyZQTyVYTS7dqp/tPGzTV96WY2zPWyjoX91dj6z0o3uPSpxBrGWKx6QAGGaZCLD4AqHwA4j+TZ1SNI4bOVwXHn6IMCe9yipXJAhGT38G4DXdvLzsKo2zjFI7wUEeVDkrJnBBUA/4Um4j/kOgIaG6sjnDkjkc9ExJUG8RhTkPQpHMH+mGeAs/JblDEegJg+gxqHdcthk0LxHyaQtvW5l0P7tZ/KZVW4Gjf1F73F7xz8OFi5DBE0EgL3JB+EddkqwSSPD7sJ6ZCWcDnZTzH+kcUMgfJni/CF67e2x3WudZ9IxkwOpnAgPaAFEwHaL3iFxOflFLfECrdGSdb9MQ/YOoY+aZnN6VBzjiIkokFyFbO9EHFmw18uRMi1akHWcx8GVCfjqvZLl4D3gSvXSuPF6AFbkQM85SnOX5NdULQA7GktXPw+SuVMJtiXvIwc2ttRIobT4Cdk9IWSJTEEA0MUEnK0B4EpEpAfefcqRE/FJKh7XCTP2/ksgNYK06hyTB4Y5YSIXhCRz3hdzVWyjRB2pMPknRZj8Fon8UbXGpcb1kgxHyhBW77ptAXKhuPeL9oxNTwggi/mk8SocHfcuqeyP5uQzq+Wy5ffFkjWDWydV9x4AE7iqfQpMTW8odhitpR8zFQspAUZ2L8LHteB01MwWo3RI6QdgxlXYxGTHdL/5235uq5NA6peSTyFt1wqyL5NwKAxKtGzBHi1ya2o4aKV9EiukN4gOMM0FFI2V+2NAg2AasKHNfEYPA5aTGSQ3SUysYZ/kNrGZyWwHClKqkwz+gKS86nHEKSO5oAqhPCgxczPGV49JLnHpgEoudSvZxt0vz8ln1iQgC9NUN7tRRoWXzN+2G7DLETf7lOA4aU7/QaKfa05ywK/JYISyjAVlhNw6yaBhVefBHTkpim5baR/NOjCDGVKX6e/SWzPJS5L6ivpE8TtPoyJHdhLHXGGyy3CJEubHaxMhHFhfRTCi8j6x4NY42zRrR+3fdhyI95/12yVOiGajFlxxvc6gf62d9P4FEMSL1q4/8N/+h/nu7qwK4r/N5DNrnVKkkdjziG+JLKWLXkoGPl8PQYtO2J9NnkguhZs8Yf8udxw8gZQSuuhloINWI8p5z6oF/ReZfGbdrb/nRMp7LjoFnBbwnsFJICDuw8/s3wWnoRxwSNe9B+CUv3NHPrP+VhSnN4g5J0T0VbolxRmxsgV0CJfeeutEAjvGue0ygHMAb0tlRkz1GC4Ddcr+/SKIMm8W6V/Wa/gNoyURAzRtRdqSSQhzHCMWVa+PWDwGTQ9KLQLCPTMhAfP3SMVVqMy5FBP9Dn9ASTFdR/UaIedL5RpeAXb2SSiyGwIGmNPwlNXoeTHuBA0WZtTyoMxEO+rYZ3UMxcc5Use4+ypsnQIZeEgRnGg8RR37tjr+m0w+0+ykuNpvLghwSq6GEF54Hj9L/p1D4wzuRuk0uCLP42cJQsJrlwC31Xfc4HDfD0vviLXtGmrir1zAzwBNDfnH6EyykMTfXbmgE5/nCPUz5h31MGf5XrXe8Xdn5TMbZX7/WuWpCRtxnSfaCasdB5/TnGBVMje8UgCbDRzrYZSrJaX60tLT8JYvwu54LnlWNaq8V9P6fDAoFqsr2SEuAiSPQ1JIARGOSYmMqTHJUYzYytuukjH1FXux/v2sfGaTw44Z3OqhmzxlTwFH433GghP94zToN02ZpdI4NKMYxZJYDRH1cK82xVCWriJDdPSASAS3ybTx4Um2JRptI38Qi19J4o9IkZWqUW4h+7E4Z4PGyFl3nAeylpQlM8T+/T4t4EGn+mKsteXPJ4mdIJCilOcz+czmW7excJWRpuMmdl+6zffXs/OZLY6tFCMeoYGTMek5VlazZA9KdAUdhyTDH7/rEaLg+gHYd9AUhikyrr4ApHTmKohYpPQlW26SHQfS/PNfgCNNjuPxMzCjgBvz8rRHILfq/aKE42CXKhhXQgmjIbSmEsbCOPyWYqG50y4rnGX3me8oobyHa2a8u0h8jWtxbHCVxjJZkooufeeO3jPOPJly3aJykjbV3fUfm/KZrc5u77n5bg+RspkkwxSOkEkGyV7i1hoaaGO5cifaC1kSUoKlkq1aJi64TY6SjJIQCCg5XFSrGTIrRfntAuwPRohF0MLHhGCgWVH0utNAyJzkHpcwozrX/2ZOPrPNhRRsSY0XDiGxUgmS3z6T0o4pIT3hQxzIH4lZb5J5FDzzLS0wgGK1bWCergWRUTqghWQkXg0oKFsZk2puHQjjiEju1OmY4iMSudNUnxQZ883TJSKo6kw77SD2pa7Ufg1Ax+mjYK2QAo8A0snVtcbpNN9SAq/y9FElAD3xamWPFoL5zBDqOixYS8pA8umjJuoSbnHtoUU2W/i/m53PFN2YYL8kPM4gYM0B9Po7pXRepVVKORh+IEznccR3egXETR247agEbuMyPASSefwcrOBdwqxEkh3LdHdMxLbgINixhyXacqQULZ8Ac6mudGmKOUJBrYbgEzADmSlW28c+AVi/zKQ0lsZCHpG5qPencQ5xWhhjelGLFNPzwtgj6ne9Pu+Syr5zZz6zPUFIvvchhOTkLQhJb81MhCTSJqULyclEISnEuphol0KnV0sS7hFxQtzoCS32At9pm5bsxDUcupZaXnGvGmy45XcqIU4S9cZ0HJZ+pRhLBXHsrRqsU38wbnJgU2qLHXy4lQYScFFMrYR1tIMaULGjJa7Dktq9XT2eJO7f+xSKe0NOugg6A92Rz7Q4J1O2Fc98ZOgNLaI3QhG94VbPlYmN30ckKe3VK+LZY9HNxHho3EzxXgWQvuL36ZGa7USB2IIHKSCYoICThNuVAIIPhyUMJ3P2a7JxMTIe0l4o7VVCgx6Djlhl/rY9WgXKqA8oi56Hxjt1yCQmi7gOisrrVikNJsscS5mvJImzWgpLIT0bk3iFu9OarTDGAM1BxtVEgOTQMltoV4y7SCbTYqoXVIveNkktlk/W+w6RrRb/5dx8Zofsp4VAzG/aaPZJvjZXh3JSsEbU5qJAQyC28Z1AYXnUkG5CmM70kBCrlrC5J6LIg6JplQDoEs3YEIGgBOnpYkSzArS73qSE9iaV7UqgVvGgXth7SqdUOWruK0hxrpoPyb/l/hmqPQ1TY2gr74E14DD+I2KjS8g8eZtsgzZuwqBNKVE9ISjPkE8erW6W6KnCRfDW7Q4AYVCsEt8eS0dhB2zUEH8KkYiUWT0A1RYTuIIRzu3m1ok1+EqDhYdcCFG3aQYmkPCFmB2jmdIiXQ/bF5yrqwD2eu4h06ZKaoDF9+DY5uewdlBC1HNLiBAafJpZfSqm2+txnTZg7fydTD6z08kckZItv6ES4/jeaynZESg1KL+RnB2qPcH6nckA7SShF9rBqbdn5zOtTuiTFx+Z9V9NDsWOviIa2R3YbRaD2prSiw5SaYcJkwOcXwKJxleVeAclPA3ZoHlkhUF8m+Q39eTsCm8nUnveqFQwWoByAE/MrkxzdEXJV5V4ByU+DdU2jyxOf3a4m3jajqRdnJ3PtDmbeInUh1ZD00kj+zgeSBHKy4BprdAizQP3XVFAQGFgi1nLn5GsPP0MPH2dghgzUxEHB/Cc167OxUg8jXR03G0MJDQc2omt8srYpousko6D2FCHrJ8jGMHVnLepjY0RCNAVxdsD0WqSaaWjSMZMYzmihAxQwHUlNLZRzFhQVmpyrzLiYgmJi0V25uZ/nZXPtMuZEtwVNY59MzZCMpwrw3uQlz/3tNtVxo7183e0eD/3mbYPrjlOXfv7Ifob2rGFT5CPdkjCZ5yF/cmY6+ZxiP9DU3sOUF19AZp4mquQl8IQ16OmHpGyMrW/F4G3wfVkVw5KXEcedBNj5EVwWKVnKP52Vj7T4azqBjH/xX7YANaRlCHT5ZH1WoTy6zLp60lYbTUC+4SCYgHUanCPiuCkK3esX1LoLzwtEgQPGoo5U/VhwzqV2RnWuo5IFp8CAnRVlDhmuD6KvSH2OZHMkekwC31SfRtndDhUPwGF6FwROA+qYPRwnInaQOs8esFa579rymc6ZyLTwxGpisecJCb8hcocxoIhJxhnnpn4l6uHJjE3yhVzULGD1YqmuSxBDRSBRScmQLNQTn7eMcDcnUiB05yA0O5uDeBvReDT6xNS/7z3J24iL1+Zlc90JYBPIJ6j0W/fpsAvDHnGIY8W0embhKuVAakOn0PWwT7I751OhpUUzkJ9HDZmNa2DNUdRCEWBO4Kzf5ApZM9mHBBfjLHrMx5ObYT77ZW78kJylv/KK3HYu1sCO30jk89041oUD8StiCzwEbQgKgNCUwf2M8uH7N8dfwc/gzwm/u4kVP0VD7hRwdJZq2XRRD7T49A9034I3k5rlsQNlYQKN0N2W9cs6azdLKk6Zb1OSOQZMrHGkl/2+DsqcZJ0kDy5ONQywF+jALvdPeMO/P2XrIG/m8lndjkajay+BWfsWfKJh2UCo2gD8ndA0wc2dS5fLRHSYkGoxKt11Z416XJKgqWczGd2O9KaKlyDUZBnr6e0DXndbVFj82MU3EkMWXgFozp7NpPPBM7GIyKD49C61WOR1g0ihzjVI4BacHMgbwduxEM0lEtlPrsv0lAeeFtl/3RuPtPn5kEpzDw5nkYepCk5ofqksipkDi8EUjcbhl54D6elrBiO4y20J9yh6OGxcQny5zSH4IXyauwRGnBgYyOMjUETj6WVICtOUi0Ac7ZTS/DpJ7RQ7McpHlNi3auEUEnETsYX01ieAy75fEoiOA2BAEXHABehU34JM7to+m0xnTxrn5qwGwDuxmIij2TA9zTXWn0JqW8loiMO5AXglauVMYEdhvELpIKmxF3Md2M32kO44FCcKkwhGOUEUg6CyTvjVIBlKI1jpwDsMmSUNEZcOkyxfEqo3gQ47xH7VgHhnuaiwWYtxDCiFpAc2vQ9XUhC7AqrsNELKvviHflMvyM5mOjYW2K/e/EtkF48t0uBpvB5LaXdaD8GbVIipRhI/fwNYQtjgznFvgqjRFLJs4kmDkjICTWWVlSglIQI4tXBO7LzUQH6oWAkptxIrqBjMx1pbU0nXVXHfX5QSm4Y9PV8rJH0HtJSsvIuAP46yeBor8hRvpM7i6UltLi5fRHJUddJrQO5Q5tTsNxrwsgpxEKRKf2AZHcrJxKgdus0nAO/3z0HxpioatDTd+QzAzJjV/mESmFn4bwWNnLhzFhpQGLAQrUacj3Q8rR6INVnlKKgfLWScA9Ojnc75l84ToYc0maj3qOkVtl8HPviMlFr6/doAfzPbXqQLc80NB9MA+Sz/10ckGj7kdebu9wr4CGd7I8LkFMYW7m89h7J767NkDEBFiuhgsowDCwx4ezaTwHIaGpy7kur51F7pJUpDUqqM6LgenFAYmv29iqR8c0cAlNNzspgMIFxalI8BYZr0QhXKZJcxrEZoY18RnFjjORycsb6I2d01BqDRi1X6sTNFZZ+SwtNiziOPdknpRuMUfRIGhC0OCKVfHj9rDKUqF5g9lQMD0VlwCXMCWsy6CiDElaUxdnPZKNId9nN9c2IijGvsS1oYRTL8OSycEZmU8MbsVClZCdjpaBFBaJFZQNS5sG4dEfyLM05mHjNUgan7shnhhLOgawN9G4FCjkCbcCtQhxtwHi9djzlUnm677QBvXyA+f4k5jV9BJysLYCUeVHqbRGwtGKzC+gMIgpqKGiJxE2h+L/CgISq8juY3gBk6JNSUx7D2lLQUI+DRfhlLp0HigK9y9k7eyUmvuKAKVMTzl+EpejLGeeYdP6unlJO+aKA5+L2PJTJmobaohzFjKPtqUz5cQJV6ja8Ba6yDGuVMjZ+h04+AyYcWNUFvzk7nxmW27c0v4JF2RAyHQN/oDVVaSwn02lYUhpBrxJaYSMBSyj2VzOMGpgP77P7uO1zklY+mEqGFmkhQlUQhMWNZrBjL/e7NRtgZ6zy69scYJ9ifm4Aoo2Pg9cL4ehW+1hPH00ulJ8+av+u+ZXkYH/zK7iWQAhgmtK8f8GSlX9/Zz4zkiArvefTmn+G1GC3MiLB66f6EY2rBL1pquJWpVF5qD2mAa6tBy3L2dqwIoAhYllC2+A65D1zh6WOLS5iqcKW34NoYRoENGxKxv0g6B0RdMwSpVdJpbYYsgn6naDHHiWgmz3S77lROMRK1O/cNLctph+xQoV9cW+PZJIeE3QCuhCDScIuD62c8YOrK2JyGVlGV2SuxZ2IBStjtw2aaWQrN5gKAN8wuj98JM2eqvRKgqpCGlSvtee74xQiRjRgl42N9JDUeR+9NkNSxIE6yrMHbVpo9jz9tGwjhYddG6mqH/5qVj4z6ugH2mTfnkGkqHBSS+DQgsJJXSwCcT8HKclux0XgCn0lOodO9/GYOLQ+fuoN26LhvTBZbipZ0A0jlXIOaEKnh+I4SHJli6Fr+AIIr5aYFsKS+mddSHT8WXVNbFnOlS9XbLv3b+fkM2MO4zcr6zSlHwEUbtFpUYb2abCQsa8Kq3fVnMbEYZb6QanJvmNhLhYtzAXY5JOTCd7DMBrGNKLGKEqs1oxe8O5G21huVu3BhuauDw7qeIOR74InUwDAXW3mjcIeT+o0tfFiGrM2r4nfKXLrrUpj1lbtYmB2tVj5wFEJBDW2S+0I3YNgQv1QpWNYVJBZO6ZRYNt2iuMcpbdU9ptz8pk9zj5n4VXGymfWuf0ijpop4li4rAWR0ytqBofRmFsTe2uVoIECgF7OBfMsN6ETT2TzKUAnnBdjbB3Y2ntqUBIxSDbG0SaM8FV2S9reTRNFA1J6JGK2iA11fZqe/1TuXROo1qx3y2etvft3s/KZvbLeRcjMTLRw7jUAjTLnHdB6cCibQwOKAkg1T88U54yKGdYuQ+TE5WI21OUZMpz3aGGRTHHgbgkJzQ6Bhowfd/AvApY44pGDV+SPSXRMxnDrYMCp1CYLKZUME0yPEsjro1YggToulC3VaeXLtof19ux8Zp8MknrvokpkIUKl3FwWMegthuqWzUh7gy4Vm240S60UDPTx82kKPBQVuIEwf2QKXIsKPPrIFXgoKnDDYzoDBT7vnKDAZ6DGZr+tsr83J5952slZtYr0VOck/5j7yXCdqdvTw9FT3aIX3Rb3j7KPv4JK0mjE/C21xnalMcT2A9Hr4rojr13q7OoH0AvnRYcjwzbcX0yt01PSwWbfEfs4Gj+N9dMGCVSimiUSCdNmn0EB9LfXBQyQB0F0wHuUxF6tnlNviO9R4T6ja5RUpeUCEx7VTkkLjsUSY14ribFJDih2XFDZ35+VzzwzE8WFu9WQyi00wUIhr8LwAG8B7PQR3GHXmeoUE/Yd3LkZ3vBUCiY2lMEoJuzImUfovcq+t8gqvk5q0Y9v464Mr5/DKN4W99e0xMwrM1E7xhmsrtffzMln9t9SMtCUg65gDCSEKNqkZGCuTzLfTDnGqjRyfD0kUQEU+iTzLX66RO0UjWkhqBSO4R0kDAgHurBZELfMRuSRGxIMzLekqLLT5qhNShHGI721FCED/pksM4LSQW7vqtqxBbVThDsSl5jaODy+Yxr7OKP1KhARKXORDxStKqf1dcF8SwpiVfbfJNFXspMc/31OPlNKQD8h6HnyouhIFBSUeipI2JtWzYW0hubFEYlp2utXAraIRQzuQW4jVBlLa2jObh5ySjPCDO/oxe02QMjGxK52ij6hHROEOiPG6KyxrZ/S0ZsU8R+MfQ07BK0AauG3mjgbpJhjSpDkNpAGqfkUYpSkpgrMq+53S+3Fc31pZdrsDOegTJvpXXWbSmkZyQzq3jwk/TUEsjagIQH1FEPnbZ1c3f/jt5bo7lHOnlPi/lQCbSMmb8IRqR2y7pY5QdHszInJmysXk+OfpaNKSjQ/jkkMJNTJBeZbgEzE1QmHpMYYOXOCpBJqR4ov5ZOZJMXltTCgrId5B2qh2TvCruKdLVGShE+leW/xiQI/15w+ANYgKGQhJjHCbXEzlfoGqoiUNcTI/NZjUoNFbc6XLP/9uiRGdf9/rymfOeDwqe6VbNJ558Se7WRfB8+K/o2DMeNAhmOvcfzvSSktHfVLdUWmp1VvjKIRpvGutGk0bTRWJqcQPLQTg71kJ849Z83hP9yRzxx0yiCCuDWIhUokkaEGsKLPYU3olzoSM5Q7fAL4jBy+He/p1K43HLWlBH1lp8TsFG6WjDEFHBWmCmuLxAuC3en5aZNBqjglwYnoEMX1VzuUwDPB/+UYMXSf1CMKOeGN4MPa2oJzPNcZagKJI6UgwXm9L0iCzssBP9JeZw/y37vE+DQDKagcRLVI/L64T9j49zZIVxEZau6AvV7paWGQ1u+QeVzQmHr/IImm7Vhl6wiwYWkew4GYDcUu/AlcA/Y9Owv7L2fnM4c+LEoHCyQ9MVJcglLa3LFktEkOSkuj51LjjvOluCO3LHPijlvFFq5tYtzRIXYyvssXJaiJUjOIOy7TEH+stEnQUIaXeSCmDFmT8VvFWGKXmAzcKMYSH5hBMnAMLcEgMZZ46KLK/nBWPnPY0YR8lp6FzZJCy597Pq3VjWKGHXA2p4fiwia7aYsj2TdJvSpNdg652dZK2FWDer8Lwq+vuBWiFq71nIgNpqkvct5qOUjdPdpAz4ROpOw6swRYISUMQogQMW7QKa3gDpzDIC0Crlf1HnfNncnzKvv/zMpnjjhln3SqJs/D3hmRSEtMTHwzWXxAAWD6A4xBkTO340CySLo6BeB/rqLj08ut7SqDEvWePwrlvNtFT5Hr8tjn3sIjh7cdRm5YhyibPNcilGlzVVd1M9shbVrCXRprCJ+G8oyOuMYIg6j1XgYDf4sE3Rl9DkhESZJcfx7QHeFWMtTmXbBW/+U785mjzuqvllbfc4hCFEQKvPvFSBpxBeWAFp1btGhoQOotj2m47Wo6Bf4nx1imod09I4X8XpDZxOereuDqakMTYNNA7JZ+6QdSq1wm9uSdqXqwzMaY4fZvH2dSPA1mp0bKRnrbwh5JSE0OKwGPVB5EHaAYMA67CNaO68I6TXUnWfJaqKidflrComJLZ05rBEDyrndLMG4uYTJ0cDjz65jKA9aXjD6N67shJhKxiQa10dTCfXthrczT4Oombcaf9DZmDxD5CoN2FjyvhEaOi47jSVxNJ/E9+yT+5ux8ZsI5iVvSCGU4ZcckIhGQVzChTOUZ+2pAsFT/OOx+JJQp2YQvZlae5qsAbwdCmTKZuuFz9i+ZvjgEqcjODcMRmYLDLwHdzwieIo3IwA22cjTPK0qxcm5FWwP5u22PaySLccNkzTJZSLXwDjOVvV0wT0w32KmFqmDWBabFZxvsMXStwy3K4but7pMfzMlnjjn7hLTf5EFJomJvHhMmmK/BXHJw8Yx4gPy3W79fHlBCooq7/fgAj+JyNp9CKUwSPO8EGDaUg5j7fIrlwbJ0p6klJ40ttT8rjUpFgmpMIlErjIq6p10BLrQI7jhTRBbgDHEaG0+WkfpjyXRpHM5hjnM2Kf0Qyl1Ix08f1xKJEs9MS9zWTSCP7RBBXJu11NztUbFNGZvfrVJzW6ZpCx8FczRscSHEhlyxur3Pz8pnnnWALWJGuKrW67u0s6FQIT8+AAHFlYIemInMzYSsfGy6Gmr/XiUAagonkvF2wYsQ/NwNh7NTW7Va+5wYkj8EorJFA66Me+nh1ma8XQl4TophSo+fl0DcGOL7ou0enLszn3nOobMhhZM7iWxHoBh2ilQY7OxulRrXMhNhuEbqAaQhBmfASUUAUh0SE5s7FITEJwPJXwwgEaEZHgbtCSZN42IAVQ0msx1pWMnQqXVg6BjWA+uVptLU3lvjYuypDxv4omBjKJJu1VLNyGLsdOU0LSMlWx6FlKc4CjYl1SolCJyIsUWrtdOrJ4nrKUIXsF3GjFZXwdr/HVJ3ShMqwdrahynlEsAY5N3DhBX3GloRoYYOd6cVpYRgVYx1tcR/K4Rk1KCbnCja7FIn85njTmjvuZvzcemBZMooPYAhoefq+I1OW8+vioMTsnhGdis9BAKg1Sk1MlsCDt1KZYlRmV8l3vhItVDAfjm0pSutYh3FYgUbC4tt+SiVoeApkhmTyNZRq0yjLqGXX/UtkxrhI7dXNJiGbYnEvulcAYM9uw1YHLECRgQCC9AhMUS3oi4/J9FQg71gwmxrpSht1ca0N75LlmerD8muMfbGVkNPKhy6cI0WDyjMAx1mvwgiVp6HHRqEBjasMCSXHRJPlNNH3QguV9glcrbpulz99KvWcfxPc/KZkw7ak5O5z0kBGxNOe5IDOBKYAavsuOdKUITvdscgiRQ0fWeco6sHXun1ECzk7b4FlmE9NhM1/KRLtQT/4IPJONL7lZBE0ADJ4YCUh6RUBWVl+fY5NUmh0hJvLhvgRAjm7ZSCJ1iFzQ2JAmCENwCdp8R4s5ldfp4EC8ORG1jyZiUVNdP86Q7zt5ToMLPL5F6YopHanIYLRVb+gja1XTYXyQ7a7oUJt91E+UWVvSPf9G50v5f9y9n5pnd/zS3WeVYJSIE6I3Au+YXIbXeP4sJ8++oXFJfyCDG2sBfUOMam7ufvSC2OjPdv7q/FscBVj8cNpclAv+uN4GcucqR4tHaPfa+kFs+QRlVYPNPBhcWwfTY7x71DKj/2zB3xqpYwmR0Snszd+IYnf5WkuX0snikerWsgdIY2lT6vsv/vnHzTu1+VaZ4iEdRrSg5X2O5kXbF6n0QbFLaL5E/rbV2oSi6xpmUv7JfqYrycYdUSDqZphOW0gpcacriEUIZCA3n0Z0QIVRQJobg1BVJUmN8CfZMhThI726khE+mrPQ+i0pdL3MdPzwSA4ysBgBOKWeCwT2p5xHgmnjHMApuya0TmAVWvB/B9H9jVuIErJ0ErB8SeBbQuE4dkiqfKqJvnnbqosn9cPQG/LvdtnwtFg35fcmrUG5DOioH9LGUSGbHKB6UxV3dBQ5ziYOx8W89xSienOMRzvwTrqKHC97nyol9CZLMxVBkzfwtZPbOP2qQ6IsY0IwqVlS6ildm+VMDQGQ5gTxNcjeR+J1XbOJkbk5v3Ol03uCQEQkiXIYTkQ4+Vsy9JWS4+PezPYRsi9kEKJdg9S1RyMUJ9J/4Fl1T2/6iaA7/hIlVJzy+4ACeX178LaL+DuNK/vlDJeFCj5m/7Ww5Dq2++BbmPvjgGb+27LiVkDjiU4/VJwZq4fQbp0lExELPFFHpwVFfSuG3SGCvQEs4gT8QxVgKpspMzI/6o+RtQvE7wazQu8q2niMbRGBgdrB+3qcdGg6aB+04Ff0ejqfUpG3heVUK5/PfFlosMEWTraRIKpqf7Td5XvKoE3t7JQQnqoeEqs+ti99T4qhbvgCjYHUYnuKb2H5dV9qtVE/s3Z9L4QI1IANdKn6FmqQFnoJlNgWT5PAwn7kT5EVC8Izcu8TtVxpObYrBlUDqohX1qAoikUzUU0YX7k+MSNcSbsdNG8DOQkym/c6jIh/Az4HqH3yFmoQzPUyPJv8PQvMIqHE5EcBaZ5qRyEHLWcquGf5fJN737W64fRgHJ91+yx/ejF5MDkj96UQhI3jTEWUm5Y+XFDx7i/MFn803v/rajVhj3duWUGAZcjxkGD7lcTR9NKe+AbEuaDHq9RvLSuUU1Vwj6rVIs2u9OYyPk2EsOAqY+d5FpmVFxHRRemE6MKyW+WWRENfGGAufpbK4rA2EaiFOHApsqj4YN7WVgXG0wYExppbahcYdJUMOItV0sDyID3/DCglGnOTkOYVKGoGmI+XhPxW28hTGu01AcrpC1b5U22Mh6fhgMOCpym9Va0aRYrcFhjQDzzOnvqDct7xR0SNTYHeNxn4dkNkY1Zvi27Ofu5jFLEC80PIz76/QPxZIdz9q1Vp3AmFPLTn9ruUsrZ4ObJXAJwxaiFpG5dKGCN/OwQynH8NAYYMcBT/jiuKMSisgSGSmF07J8MnFCIS6ptkihfM/wkMGp7rYB1+bqytjpEvbZSieTjjFnhll0pklWjqhq6KzLpPcKe7FaZYWWSdBpc3/uc2gZsBmE6V/J8Vyuisd4roqhPhLe1jjbl1+zOl2erhpWVxydyUUrmy+mToKKSz/qsa/hMgkQYXJHLUoSliv4jmKt8w4pUO2t0CkF6CaOs0IJUxXucDeuf3pGb75CevPIevN6jA++qaekN/WU9KbaelPp/cVq6x1YorGPNsHlNxjsx4Q9P76ksv/lF/JN7/7O3bMGBwDHtT2tso8jVYrMiQDJ5TulKksu+HE6a1h0wtJVyFmZO2gBHlFCCnYyT7y9YrRutwE71P4FvCkfT7zjFKmZCJ6umDZTpRFJmqZZELks90mE6Zz2QMJ0vQoI0+2VIkzjlFyDyUkUinkFQ1LnBYzvG9Rcixi5XSSSiC0ScXdgZjJe3sPMBkfjhuTxmxQbMy8IkVrMlbAaw6dHvVoYaXxVi3fQ4tPg3NLIKoM6oTdMUV4ZTgs+oCzcZ132ulWaQTTUVVdc91qPGTR7frtU3slGofeEFI3U3VqqkuV+Muu0kNhULIXnSlhCcyLaxQw5cwC0i8Yo3YNTp8i4VWmP8z0CxpQN9TVSNx21Xln1vnWmRLeIAOC0YYu0g/iq90s6jThbTpq2pAF5wp1SpZVuR2Tj9rpqKptX4r/emW9693cdv5Jbw02IKTlOr3GGWsF29bm4BvLyplP7KkzYcD2oSqmoUiadp4Ue+t48CDDnxDTrErPhBcuMrTjs0c8sOOgD+jvijZ5MpG0C2nDADM7uEQgiPhYXFwjYxPnsBZkGRQJxhOcDRtRhUOYa8GA7jIbs6xykZ8vHRL+YYNlGRD6CfUvoW/8E8KGOXzU1Ks1ywKD2QVeZ5Y6m7kG2hdeKpiKrooUidS//t1Oq1DPCeB4kOLF79OOmZrj2+w1aqOPT5oiLhA78GRMM5ND7iRNzwsFfJqIWNmixseLUaeWwMy64pLLfaco3vft7bpSM++ZB08OQxEH5iJgv2Az6gmKcOWyZTVZc6QBcZZm3zdWX3vPSOCJxHEoch7qFcWgcR10dqhrkpo6Mcjr7msr+cibf9O7XZFK7q5CfdNqxvmp/NgGNpDHLNn0CUHgvIiilleV6m3Kyp4teUtnn803v/j4Obpo4xUKgW/SgKKtWghN/Bnx0x2EA0+MMmX+OyRZeZgDDaZX9ftWZ+Od3zx4cwExYw5toeBMNb+JDeRPqMM3CPmh+cFJknvIV5PHQQg9a4lJzm8fM4akfMmQb9qxvNrVyaZTNO0U0w2KFoSyuQdqJrdo5CAcyEqsl18chSUFBr0aGXr0dck4tgHpBTEN3nQ93nHw4aF7vpdI2GwptZJFdYmq3pJlbgzQzaKxyNxbvS0rimuV7sH82/V3B0PXmApbDaTiV7i9U9crXnUI/ytKseTO58kaFyelL5fRS3k8DWPMm4yvYbileUtk35+Sb3p10HBauDn3yTTHJtMVgdAh/oIR9XMH4Ja8cdhDmFlGPyFe1eActPk2ig0VkRPi4hJdQm0UY787YCBRwi4/IV0WyWHxj8zQljqyu++fRtJUwxDyrpW6ooUhyHloE6fXpS2QtM+m9TpH7eJV8VYt3QGurRJty4hxtSu+sRXz9zaox/QeOMc0Dn3ghmfo6BOhU7ih81o+fATz/cEoFHSSk8Xdhv05+3mFX5FmZVgBbJZuzCMQqvIqlw3WAqW+/obJ/Xz3Qf+hU73AF+5tSHIv/5TgPhhqYcS+EtnScaDY7kQl3N4LO3crtjESdtto48ByRt3cOUyIjF9B2W+Pvc85a0CI2lVtiEDN8BoUO1GojCFHYEKoXcB4n8W0cajZPC5rM/LcGAxFIHGcilhQ1U4ypxlgl9/7eDn4b2x5FWOOtiHMzmOcO6fT7bWn0xgYP3J7qgG+J43PWmDdIKGGzA7DT+pKYfdGukmFFdv0U7fvcWSvl9vU7803v/pFTNsetoE4DTpBblnYrsRMRJsyxBaWZC2SdcnpW60DKWRnGkR1pJYhMIq02YtmsxBjMLAhYJGlqmeaBOAK6P68LPluXGjBkK+wXsVlNzHFb30LDefOHYwS8BIUw0BJB/phK9sfSkLlMnB1CMNBEkFdINq7XKVr4ZDtwiy4FhcqVXRI+mTGKhjNhxMaJurCfoE9E7bYhc5DqFxHtjxuNzJF7+5StMic1BRfMeGX0m6f3McZR4kAxAIoeqeO/4URbhg1NWMuf5RM7/bJ1Yl/J5Jve/WNHU5Ev9udvgGwjXpJoXEkwhW5Ar5ZifiZbehymsVwqMxLyRcvi+LPZ+aZ3/0QmqUdEmn8pGR2fQIPGK3uf4gAvQH0k1nIOx6OuMg0UH5k5ZVpdQ6OcmA276+aUaQs+NGUadmZjyjQvtVWgElmlmWZSN4sFkOs1VFmFiMGfXmEgHZbfNiIR5SeyKQO92v80K9/07jccN448k9HXRGnPQDKSl1MQLwm47HI3xECGuBkvRKf2xR3z6slVzSwOxOkd6zkDhk/bmok/v2zPRAVq0X7ytpKkA1ezdYsVQ25l73bJb/7sO2B0w1PPvoLNHTRI6uIBm0/qkBOFnhzH/N1mdyUvc5zXu6Syf1r1PP4UVzI6klYnFjoVdRFwhDOrtaP5tsvt7cL1EjmDkyvm525FUgivWywtXa9T+MW1+ZZUYed1w8xFR2jmykdJguozVtnm+arfcdUJkTOLO7BMeQRQU13IFM0RNmT8wSq32BZJbepflEhGjF1UBLn4gERYa6o42ap/RCI2NqA/lEJfUChFn7AZlMxvH7LZVu0K03oWNwNKfSr2VeohwdWryQQgvthOh5POgQikNXG/e7lZiRbaZIfAA2DgekrqoVFdTWuM67WsKe/B5rTsJ8J+nD5I+zHik3yW92O4SmW/Xj3J35T9gap1YZ8KbDRjSIO3IkmnMnBsy3JuA67c0xDzHE72Ib0Qagx6kzl6g2fFVjAxnkOQHGYeV2jRHjMeVMge1NxLKvuH1RP8LScUqKnrTnReVGM7TVqBC0GgNYfUNoF71QdQ+FcOpARFfFWLd9Di02CTmpHhVS1c5VYiQbcW+MEjeFo5kBIS8VUl3kGJT1PiyNxQ4NTBtJUw7DwbmBxSbJuwVb4KptWGmF4yuYc3O/SlQQwWaQOjSm5/M93H35IIJktYwqSHuSsi592LnDZexHn3P3NChddD2gp7wV8g+yl3EK5Szq90AK52KkFXMJCndATy3dTNMDziBvy4lGIm49DiOLQ4Dl8cR4DjMBM45Obdr79m5d2/Uj3137579uAgJpY3GHVsT8BZqTjM64dJOkt8/tCUw+uvSwCFXJ0BOfJWqfM5R07Z7cX0ok+4ncpAsoNSwvKvFkCjdOJn4BZ0gFuwCz8DVVCqi/eSa/djWd0wG0evxH3GIaVKi8u4Zs1VmDK2w6LaWGgCpUKwngMD3lPum1x/QWz9CRROlb0ShotdQUXGl+rB7NQqTo/5rMQvWCpp8s5807v/4u5Zg5gJiJnmbro3kThW3puVvrq9SYD14nEJRWbof9sMuTTQuSuh2sBzQoQmrL5BarGpxHocQ221RGqEwsmgCEJCFUaEYWDeKlm3DA8o1sY0ceGYGPSe5wS9F8H+XJhaZUPuTdApkXr5ENw2IcIVYh9JOkkmaLJJIlHWG8XRrFSQ4tViug4bebkhby6+dZhy6Ld6dZ1caJbkgnHZlxn4hSAXlCgXkGbrg8uFSJQLEcoFk39aSfrkffvI/no23/Tud+r1yVbpzDZ/4DP7qdQnFKwLzoIjuSKm8hHc6fkaEuXYY5WLaxww9TqnrIhplb4k8mzJQX3u3UIpHr1J6njgfRF28WKYjc7kFIVxbl1gyH3gOC6L2b1SEi7cvxWCBVxV6d0rdcLx5sNJ36olqD8ntzCptgIDC2pnnF4V6ky/lMawasJI90iVfuGTUjAJywI4MaO2pZIDPixRDnBHWww7GGKNNRorDEVYk9tcRW2R0oOouziI5dBPdcUIX4FVdPq7AGP1lqqP3cJhKbmWe6xesHIfv1s1bL7rIhxGlBC0iwN+inWs4FWb+k5IFVdapJI6pkkLd9z8DoY3rxtCFGDxM21ZdW8l05UwXMrvkqgV/VbJEkVgn7m6XboD1kqbKoRWcUw7NGhitRtspj5pHUzdMWv9p7kWHCyV9RKMzwRrCYQWsAeMCcLBmDrSJqGRm8eGS6UmQg6Uc1MaM27UWQdSffZTSR4XjnCMgUOJ6lWLPC6ck2+6FrqFnFYBFLQV4KSEVKirB7DrW+GQVAIwdUgCQTKNHMNJpw/Cidwj9nAfYJnEv9FCcGEKwvO6jcemxfey7ZcA2rtwSa3TPuIk3Md8Rzt0ubgTp6nEr4Sg/WNKCh+PsCdBdGlHQGfT1eCA9Fs/bnhbSxcdkGJ/lTosV0CJ6h9cBlu1O4aWWOEakhST4zATnOjcgTRYj3J1BJOwlL9CW/BUYws2tmD9Fpz/trzZII5oNmYaViJ1C36tugVPN7ZgYwta9gIl1DpgC3rMM1SSr0qtdDwoVVDDpmGQePUm27X0FZWtzM03XXuhsV0b29VCchBW5urbYq6fs/mUr8dQoEE2bpQ6A2ETCHbMw0fScJUe1xA9BOgI3gVPidn8ZYhO9ZalIcG48aa3BJyC1abPnv3ctrgqqZ703CAQmKIHwtae6KhrDlevldx3F8OWUMb7oEQFFC4SOz4sU2hYIb258wRONyK+l+v39KabiJiJL6vsf7sz33TtzI2azIaMacgYZHnS73xaWZ5u8ua3DctT4tkf/bLK6urZf7FqXgzc9Ogrjp9Dap+Peu6QlNMKBk127Ma/7x1UAtX1gnGxg8Ni5LHDbkYM6gpaxWVixcJxG5HgHsv44quwIOYOqfyHj0n9MjyG9yMYfYMCLgwuX140LrXK+dFBqV04QhF5ZSrjDeHlWe01/v9vifE/ZhhcooSIK+OvUKHGV2HHmDsogaQIrzJOCu8QX9XiHW6ivq+/o7K/NivfdO0ll1p8k9iOlc6qOmLaQ9749z+cAWpmYpf89mnIaoVIXgGlZCU741WcAFTCnuSaxMoeqDs8Dm34DyT/zjsOJdqwo9UzYIbukHa0riuerxwSdzRAGMMXsWyDdu4RzJNscrPJUxdV9vc/k2+69rKDrKfFeF/k6jacFKQ130NeabJ3poAYl8lDNeLtx2LMr/WcHaiNi2K/Lr5aKWqBiBIrgJgNnS0pvT/WQ8l1EVNcU4H1tj0sGiXOrMKgFohGgz2SgDa9LIj3obQP7sizAHcMeqV6izJDXZZryRKl//zdUrvI6cCGdLhwEOzRoTmpTKKhUrItghizHaOWk9HSSkRLc+rtJmjpz0loadMb+2NGSzP5sfcIeHePf3DMvZ4R5j4QMff+rWDuffTDeczeBqk/i7dJZDinulp/RGJ74DqkCM55hVuuokygqpVmp36k3UWdl85bed1rTfmma2edvK4akeq+EfOy6KVkLXE9TNYSkyeSpf3kiWQtEZxOqXh/uS5wcmTm2Q4lZju0nO1QI8qZxfJXaBZf+cTNYu6YFLBXYsA+vJWAvTyLf1edxVc/cbM4deDnIOZcP+Olr6jst+fkm6695sJVb/8p16VPa0xDvTCTN/fEN9diTOOjfVMp0qHEqIhK376jXyaxe84RGFwAhLsXaVf0M9B6+AJ+phO5+6Izybsef3flgk58nrN7z9QlqT6uVL0editSjPI6/4mbRSYQ/cizzfIs3lBeFz55e/HnIWFaP+NGeb3uKq/bf8obAflPWEC+fvtWlVf5jnzTtbIbzaMKrOh0MpdeDdlqzCso8jeNQxljv1+qQ3EJHgt7DR2jQN/gQyNg5pzOQW8dDl6VD4iBhmGTWBWiuxFTtIzh00kUQG1rYS+zcgPSdbeTjKWAlLcczitGOesIHicPxo3+hZBDzgk5IHk+X53+rn0eWp0aKg69LZGqN7A/V8Rx6AElVAeX94lt/0qclKV5PgrPGY9JTesbE9YHacAYP6aTC74D/Mz+Xel4QvXcfDcY4V1S2d+7M9907Q3XAey5uTcSDoEWofjdVAk2I7OQ0XbjjayBo5fTV9FeLdgHGP6eAMXhQ683LP4Jh6DulSKVCut6KRpYKKFlohkJYr9hT8zaa3Xpfx0WYqCBJmmgST4SNInqIQU2cc5Ck3y9qsDerGsXtCU12TGE1kuteNxIKG7cMSYRfPtAxuxTkEKvxrCqs8nMt4A6zNwRjr55OizuSpsy3g224lVuZYJ955g9wKF3HmEqVNjonbhMvvmWEijDkZ7aN0+XrK3qTMt2ZsJKsQ6lwzx9FLKke7jJLSSG+pzudXviVrjWm7fwHfGqFq5WiHILm+lGfc4smW9JNcPVkX/cDjbrty2s35ZwU/8Llo9448RcbJyYxom53U7M9JGPOpgysxPz19UTc6lxYhon5rbTMT/TwNnNT1fpKyr7ejbfdO2txulqnK7b7XQ1XNmGK3trruzNJWLVp73ymXzTtbfrKiQaIrEhEn/eRWIjbfRJSRvdXFSNfrnWc+baO4g05r1aAfNyssTxcLxK+6sk8TeGndB5ke4QgYEaX5XvoMWnaXFkYj/GTnyaZmyPeBXEfIXb3XtDFLo8yxySHedU9uLcfNO1y24xzM9WyuseB5+6h5U7OAV8JtthzintF24DA4RUr9oNp42NFVKuFSI4UXukzgxaTOgZWqKPSWNoJ6EXUdqvRlxY1/utAxyk3RKCVxHatxIoieZmJybOcBX5hEd12obLK/SgSJm1xHSdMwQwdkcAQ3j2obSNRK3L5GKa2WJsyURrpvtvTXeajo9300wOg7wn1H3Ub69FcaJ2dd4rIh3hYlNzQOlnJRBBevM/tJasl6jbXYn6/Usqe312vunal12OKz4e2DX1VcglA9Pe6CtiDjxuulYbOBQi6GapkTmD+kOkQO80jXfEq0q8gxKeppHJkkZWGMS3SX5T71WxUo6qtcInaalGtWASFaCpiEcpUR8KMaZHDAuVeFWJd1Di0+BNzchUQnUQZga8W8CNJ2UGOlxlPn3OinPe2HpfaWy9T/HW+/hC7Olb7zfm5Juu/XJj632Kt97PR6w6eZuWvqKy/+XOfNO1X6lj1m7s00+Tdm6EOD5hIY7kMz/6ZZX9q1n5pmv/1FFNHBp3EIAjEmtMyH4qeVVlIFOIBmN+GJuCl6Z6CNlqJg9JFBI5KEn3cBTkwfhjGgTfxASQt5JvO++YjS2s1ULFBxtAlSeUAH7l6m9FfRGvT0DMgZ79/sTHb3cwicESCr0ssln53qmaG7/qNu3pvznGswKCD2vATUxjnkIY9DxYj6edvvZHjNdvb2v6rT8uRj52mSr2moc7pgWSCw3e6NSA1C9YjxlBbJMa9NgrHZSARKGOzIBb7BSOQQlDb1pnvVIgkaMzsbyCzmpRMY1iWfWJNOXbHZryHrwj9/RUQsRIr0TSmDgCYL0dEiSvctJNTHju2fNYeR7rSfrdRtRTF1X2r7P5pmv/TG6piRtz8nVRCrch33V5EBv306cQyFHEi6OxvXqfqKvanTTQoOkEIfSa8osStw0SHbBNkdCwrxnZ1MM2KVMYtUhtkxUEIg3j9SYtsP+w1lNruQQenvMlkbWdx7gGSR8qRYnLxeH54XVqhu9y+9yt4h3atMQGI3LEhwMaAqzB0yAiOGC9AwSYcnKaRRNss/eQmEtlBD/alqxxa9811rsJ02HbPoU54hV2PtutccCODIYv+2HsMBC377QpQRxG8l0SBQN3q3HEGM/uJq6h4J4yEsuOBgapiNv9bhCvbhS7nG0XmX46xTG1YNWNhnA/06R4HZLlydzk3O4w3Cn1rPDERud855CFINBEcEGBB/0uQoeWhTvAYW86RaH13KgtRsuvz6TlrLeQW85esBoWv/WZfNO1yMUacS+pyXHx/ShErlfGCqe+tTnugAITsGBfnP6YrCU5sR6NMn4E+nqk9gth8hRHgnOTfmCGMv2qYMeZYg7sv9TmKLYB03nCnoNOsSH9E0zbI3bjKNC/TH6zVGra630RftvlJKeGJe6ssFfi2dJx4Y3Q5ajUDzvsNFLumO4ltU8HpB6GuTGWdFowvvm/6X6pETiX4uj1uLM0gyfsdaI1na5LnBSI4WFK3MV8N8VGKhiHOZ7vVaJBy/PdIzXDrpgUkm0a41kymoVkvOZ5wUhEh5YMZpfApz2uDbNmi1pbh4sloiDUsuzbRy1JvFzHL0mRd69bbq8+g8i77nXl0pXXXLn0aw251JBLDbnUkEv1cik3Ebdgrg+PRGJ4xJtJWuYmcul7Vbn01btnYVe6hlxqyKXbXy5NjhMbKZypyjMzzzcpMd8UMM/AoJRv8urCZw35+AHl4wxk2E/eUtm//2y+6dqvN2yrhgxr2FYN2VFvW3EVz5o3PzFVPEtnUsWDzOBcxaPFKh5v/UyqeMIPXcXjzayK5wGpisdbPKMqnqUfWRVPit6Z+5bKXqjqnd+oq+JpKJ6G4mkonobiiYFEmy9+WoFEN3nz2w1IlKIQfnyJFMJv3j17EAt+GgqhoRBuf4WgKHPSc/Fn2R/J7BhuENEH883NOTbIV+H9zR0aqu+jV33es6m7pRk7JlU+dMekSkuqoiEvIPjQHZSCGUem1rytsn9TVQi/VVcAui0unxXGvE0BaqrSCytLvl0EnesVnb6wqARJ5QH7gWkUwjLXamNS7+GGj9xcS3CHe7VKG+x4Pe9qUIS9uC6BDxQl/1ZR8j8qSX7TY44kv98iSf5Klyj52xzwYr/EVMvYLa8DMVX0XcYiWTgc6006lCk+tH5bwufy3+oksEMvESXhIwaCyEaP3eTgqNi8iPSDiV9A86Ic4QHVjrSGNppRg+sgjvAk7VaU2tRAh/81+KZHYJXXSVeNf+803dFmt1rP2e7sJobPLgITkvaYwp1IdzQ94npMuyR77Rnl9ETacw0WCiQKI/Q0nE6DYQT9Gxbou111JjWxJ5dlK5JPCu3GCPiMFe0+3awlW4lXtkWqHuGolsJ2XwsN1g1swUo3/LYQNxYSnIuFTuTmSZUytwx6VtgoivSh31sHWiYX5OopgHfvFSUYn9l76a0Q4bZIcym1QGMSrpXsJ29BqublHbEFdrwF9rarF7a5OuayIWi4QK3gfhvxxGzzl0+I/M0BYgKn96KlTjYrMEJzeQL6ISEVA3jL4W23Oxqmz3gRgreiFaBrKKao+sVzxrprt9ntko9xD4ymBee+RKXd4WOAl6Xf6q1Ye0W7C04TR2NzgyIulzUZWSbFAS24sVEXFGfQnNfwzHx1sj9uoZcsj0MuQrhHtK2V1CQsAiltYrMEpZ/sk1roTbJv9oDYNm5dWts4X2wbxwD/m7SNu1dqGxcuv13bxqkZtY0LxbZx3odoG2fyGxuB3IJ2qd4CxT4juIcnD5Bv2i+dtwWHIUjidbqt4CrnLHn17+/IN127UscGuDNNYhnikdVMmy4VlTgSi0lRQL+WBqVOBf4ozv5l8gC9XVLNgz4CNvEWQCu/qAXEO9tdrIfR7oqeEe0uxyYtEKs/ovJ9tiuXz8AaY6w+a+VuLVi55WHTgdimStnlnBamSgFyreKA6UAsRBoi7DFAJ3iyrv7SK0maO3xepMshbD/XLEwDCUmOCvOi7UqKuPDuimtVbVnCfvAW0f6EvszhUyTnOn5qeNNwp2sVmCqjB96mU/Y7jVPWOGWfhlP2saEnb3LKXr8z33Ttd130ZOOUNU6ZG0tGDKLMeREjE2+OQVQiBjFIxCDe9qd9BifyJ2+p7A+q3vDv1TOrNo5k40h+AhUft+L69CEMpmeEMLh9OtDeXLz9+JLKvjc733Tta277T7Joml+BNl3QizIC5qJFp6WIC5MeshicHIG4BWWTFMaQtzixvm5JjOgdohhZLIqRBRowiBxZxspvtVHKGQTFOEsuLMDdKABxA072xyhD67ckXELIHHoUuw+gAtt7kPJfwI9Um3nTLm3cPcJBSfIQlegh+rdUkdjtbqWpc5btemMr/X5jK32CtlLpwMdVRJa+lX5c3Ur/vLGVPkFbiU3WD147Fd6C3+LUTs1g21Vt/e9Vve+v13Vmbuy723jfMdtQulmnRLMumJFB+9ECSZMN2kA0+1SiQauf/2RCZpPPcNWgvVr11ycdnjZOQfvPiulr0meGlPMIvHQpJsexgZKOt9snTY0hZ9qMXrZGf2k1p0mVUOBSDCAgs9fmZIvZ9emOPZCKG4lBXPVeNYM1uEQmAjBJtMO+au7YbmYKNiryxnPvlggALwFFIwK0DwjRHu5QQreAGvjV8B6OMuxFFJGd2BcgegYE2pqYSVFIDXcacVeze4Adj2eoDAKbIwxFAIxWeIxI+9WOR7kwKHWSqXCH504J/uzsDkqT6wH32JepSDkU97qJJdyvGEwnlHQFHRIUOQKSMs0QwUU6BarBcGmMuzCUTgOkjZvIBwhY/7wybyq8zzrTwJzHav+WrqqVUutwpuQKurW1B+tY/BZitwsnrjQkUcwy1GzyaTA/VI8rukp28P//qpoff+BYvTKTX+UcTEY7Zuanh2NsoBVKZCQxIIn0CC7Y9XHGg8LhpUN19aAdShw9lUL2GcpYZwWotEnI/3uEoAgAE1Rhu2+dhL1zcFGOzqywJwtd7wP+7f32iOe9IrCENao+G1Wf3odsQ+4zbScUe373jnzTtT9MSqhPinkFtmPYaIug34uiRShD3JpBuD5qFVNWkEbVqa3iA6tYyNxRi0+Xop3RmBjtHJNgVXiHArlNATxND2mhLMLVs4H5Fs4I31GJTxeN0LGkqL+8Uh5peC4yiPbDO43EDL6WKO7F2S+bb6E9wHcUdwi5O2w1VPbbvy2PxBBhC5zV61gn5ltaKGOo7K9zsY58TAHH+tg16MsbJ+iPGieocYJu9xPEXOY/BbgJWpzVE/THjRPUOEG3+wniQqeZwGI+tvByymn7yVsq+8+y+aZrf9I4bY3TdruftoYr3HCFb80VvrmErPrEN4L330gE2zVEZENE3i4iskHV8wnLO6aIrh9fUtm35uSbrv1pXZ8/kh2FE3bOugLYgcKr+JmGz9x9xa2Jo1Fxdql62uPkXpskC73VSlKJfA9WiathBym+Y1rPHbMrEG5KiiREDpNNjqriviarxKroVpUwE6OvzWQmKq3STKhVM5kJtUqaiUrrTGZCizPhrZ7JTGDSjGdCt+K+bKd9Gba43aGaX1fZ35mbb7p2tY4x5GeqUSOnWbj3jM2AFAcFFSNjbPOqQ/wu6TgfOhuFzQpqbk2vmn6pK5IeERunUcLvY9PODisBt5APHwMJ2M09ppQQNlW90lzUMDX7XN6v3RKPAFcHs0Ss9EtdhKIR9wxOHTFNB6FDFlZrB0WJ/aSyQ2ok/1Hoe9Pb6R6utodz9FTc0/NWrBdTuX2PxIjhPUX7vw2ckKM0eiwGWIbsA9xcDi0ynwD93hc+tF1Sp8PCoqvD9Osqq2fnm65900lAB3xYnwUSqTC5gWvueRFjMazBl1Jg/E0Pxcc6xdPjUncPFpTF9l2wadfGvRSt7y6Ov2unfhWkfu3P3j8nimoqkS8SrC1cDp0T93BrM/u3qttpYTXMokUJxDBhm9RCrYgoECIu8obVT6tMOOhjM+hxbpdlt3+4sYW+1dhCn4At9LElJW6yhf5TdQv9WWMLfQK20M9FVD5lu/3kLZX95p35pmvfdh23xn67LbVeI+jyyQq6pJzdH19S2W/dkW+69i8cVcELOXkQLPHlNHCZ43G+hkNV3cI2Z5MSKhxcHuQyFzwDTrFC3/L7JRgpc/BV6OjMOwFLQPHSuc9DjQM0a2fmqHAnckT5wwhhppLzUYk7TKFPS353YVRkpWQ/ktgpiyNSGqcwAn5qM3vYcJUiNHoMhOowHD/yjKISCh4/hBJw8qynjyMbocaZoY0X9SGMmb4FeNeIwL96M4hr4v0NH4XnrHaiN2w4fx5E8Db+bV1XqaMfF/ijhb095s0KbfDHd6qH5ztOpQTDjf3DNjR29i+DnuWgzlJbY4TPi6h8DiExIR0cjqA77ulut/rFgzXNBwvCB14gAZV58zoJFzr82H0/LqpCMlFvTKIvxjt6AWP1NUCspUCn55BqTwbxVrC2HFM4rrFntXQ0HZQ8SZ30/TExAENv5LfGdJ02ka4yPAJ2QEpG8GPuM9phSHHteSY0vhNweVBxiFAk4Fag1YsDaOVo3inCmvhQs1Lp4NwqhCs740NrjbQzbs9uFe6dkltD+4cl9NQffSbfdO27dSH/hvppqJ/bSP0En1rGj9wLn0zGj2S1X7WZwzvyTX/xpqP2uRRqzQvSVEwcYo7Z2r/XT9rDWMTkQ0MidylPJ7urOXDvtxiGU+sQ6glbBXr99melki2wixP2Z5U9yZnUyh77d7nj9me5A8m/C54Ve2ss4ILLmGXeEq/DuDFzxHY6BVkVpvGpnKg7kiSWlbga+mlSXTR3Z48Aqy4ZZ9EuLZVo8mpstcMERklx4MG352P0ggj/mW/q1Wt/fxHejYXSbJWS5zQKG/Krija73yVxugeDcPVhzaEHOAKb2G1cQEfgfWa49i6p7B/8Qr7pLy4mdVGYPpFWueY9TOp9GIwMEhYVNF4oy4oc5lzp7yH/uVNQHDCnNoC+PDbkkG95lcLcKBvBWEjZHZvg1nP3O+befrFjwFNicOsB7BgQQseA8hExP87Qs/t4bqTcuo8FqCSdKliAytncxWnBM7053tOW48G/XXVzGjKjZjhr18aGuhZIw7wvwmgcrvCIO8cACzMbo2ikGQOiHQ2JEhgQfEe9LrV/Qa/ELW86s/TA2eW+BdB1g0+aN5IkrYonxXOzHbOmQYD9XqQyZGbICHeCXNiiTNDRLmd31nuD1KVDM3oFzZdHleRc/GKcKbeu3m2X45s7EKd4hDNI4McA+P3V5jjwKZDc8fiZeQPDbqu0ZEI8Rt/FTDQze+x2ncXRF0SiurE0ojqmikOiukpRJKrbgO/CGWwfuvGEdI4mR26lK8FfVs2Wq66zxad96iK80YgSQhEh4y/6YioAi1yAMIIFIAgw7gZpVe5aMHlI4sbPHQJXAkfBzP/Ud4uTFRMTgIkgeod5x2zpO3kipc4atJUi9EhEfRTUvtrf1ycg2E7Pfn+iLmFDUSf9zqeNLUWdnsmb3z4BdKo99pYQKGzR63yYvqyyf5LJN/2rl7DFx89HBU2JhMBvlGnc+mUrV/cfZ+eb/tWv4LgbtQiNWoQPVIsQ7aO9dvYN2mvvn7VqEcJMftYPw4vOZiPdUTiJHirgeV9O9l4LL+MQOL1VOElDuMzprcsvqOzR6gguOZ14GPeVMgKvnDwCr0zcBM6DvbL14B/Oqj75Mj45R6HmS2WZWIYWig5cBbi7Iu75hIxeu9jaB+3E6QXoK2ISCQEIlxZDEWyPaRV2CWN8XbgQNi17o2DHs+eqt4ndVOjoVNq5XypaNqnI4J3YF8zbDcetKz5YyakDDtzWcKCHnB5H5QACQbmDtLuKr9EiT7xEi3z1bZX91zcW+cu4yNMUzGl+HaQ1qTIPZd8q41rRNJstANNsuLBhmpWJ4NfHW1l6V9qlRptsQOrWtPa3Jn6HZnzXTJxvzdmqTZJbUgp0CrMSMzM5y0sbVQFDt2krB+7u9GAsea3lnD7A2QhezlFezsgs51fk5VSDurGcCctZntFyVnbNfDl9cTm9hOUsHdDOck6Nq+w7d1aX81ccbAMr9G+X03DP3OnT8UrpU78LJHMX+9r21alBM+u2DCb5pgFvbjz8h7TUV/Ye0weNjQ17DTZIncG4553aIqaUSQYry7ix1mB5ap3KBqzOQNS07pRksEu/ztEa7KTFFHxB4HoWZ0/drmumxDXzfkZrxvUvuGZqRmtWEddMrYM1M/3G2uhkzjtHgrbokGbM+mH4T13UERnvU6A4TTL9ISUkMzkmprYqIXHtbVBCPZRaKfVXM++bU1hLBclgbzlHMGdUO4Z9vDslSevy03K6OQSbSq2LY38pkpbIFys9Uge+SqeUIMUI4dSgsbCcMzhx6oOvjhZXJ/wZrY6pw4PV0TNanVBcHY7QfNyrU8bVMS5/q3valEPyNOuH4a8mnDZvQCSffEABRMhZI86htItrNA/XyFsvrVGCrfIRniBPPEHqZ3aCWIt5A1o6QYUPfoI88QSpLR/X6nwUJ8gTT9BPeXXqTlClpB195e1X2R/dcAy+io4BwwsXnUk1Sh7jUjclpOMinMBWJWCyONkZAouqQTTxpmck2kKJPdkvSsnAEBlXaaS6O3UJHjMtuYUeOQGW0LXFgYB6jt7qSAVHhyNdnG5bqASsD3LUxk9XQmrH7wHTxH+afcAjJCyvcswqOK+y35lTXerfqoOkbTN54TQK3eU2FXNdllXJVyU7DqPYfCqd2PYOLdwhvqrEO9TlK47N5M3CpdKbBeKbhfKbLRPfbJn0ZqH4ZoH4ZuHSpDcLXv5EvVl9lm8xyaq5nOWLRlX2GzeCrt9EWcVdSFRoG29TRFSsjgGqj8EVRQh4cNLtMF5VwlVuua+KIGRL7K7rHjp5Uy/SwCvneODfwoEHhJsKh5X4WCUOUX4d+dU1XKUM9TEsfzrkDvy9l6yB/+DO/Kwfnv6akwtqkaqfqm6ShUXejRtTAx7UQELWAkZ8HQSmN4iAgUWm2smoePupOnFEFcDWMiiGMa5FwscugNTtWQpqB9B4m4NOJoAUxKXqNmzD5G/sJSMn2gGRrOVANYbCMItT6Yqxm/Fvgz6ngoq3CiIHWbuM2eH+8Bi2BNHG1Lfgb8e0tJILEdxTddnttYOegutEeJC5QwxlslbrCIxskTSy8JCUCQsHDVVR7WhCd8LJUSUF8J2Q4GQQVx5Yc0gAKr0SxsY7qT+tOyGvHZIcFIek7oTRHlzR5kNM5ADERLTuCmAFQZ9c5GEEaoWNvwWXVPaNqvt05quO+1Q+Xnuj91+ScqZmXy8gMQNQN+4cUOiTMP4BhK84qOV122+gofqc+0aWoPqcoWylcfeX9d0qTSLnEa4+0MJ4Ndh8DD8r9ElXvWGAo/aKu9vHzgOYQ+ZT43SWnKs58WVfzdl9H9z6e/00pO/aYZYYW/40zEtOIyRinlSrEo8Frs6W3BHv+ndNRsI6nb11LJx0Uh5/05bzutNew6mj+BmAbQNbqvjPuatgf6ac39lnYhWdifdf4sQXGxlTF1X2aO1bk+dVftZ/DkM5vzp53n5A7S+Dxzmv4TPA6pyv2Tv5f8x//hL/T/adO/Kz/u5rp+rM8803bzClukGPikahx60c6ERdHYddRkZDBXq38Dkzngu3m1igJb2BEvBxDSdBr40JS+q7p3hzU/toMDUc+HohBWinAfLktcIu3I+zhp/Zv2uua/Bgf4azrRPMcWzDgZsEK6hxk9RXbl1+KbX6YAMCjhSE+pnIyFuOZD6O1WK+JQGQ1E6xGmEnUvSIUKt1DsxvRyogaScAksLNSiil+t/nVE/GaTfK1zgYP/cH4z3axh487Qdv3Lxg9AdviKEanqu7DGejYFAgKt4Uhy2CWALHFx+AFVsomefGxKYIfZnaOFWKElRpahR92s1sgj3KZYKvW3v7v/7/KK10RVU3+Qt3zx5E+e8HN9/m/tjNtzkXB3O16zSWKJBPE/RK8TmMSYXczQwqVMv9YnUifVethmleoQWb2UgOAgn5PVIfLKYidYLKa50YG9Nj3QeLzlCf1dJxCQCIzJa715t8iKaPJh+i6aPJh8hLOUT+mHuIZr/Z2AGf7h3wf0aNJqO3b5PRlq/WZmGjaMtx/zxeE6TINC3RkIp0k7Mm5ltawC8j6TQjdz2koqZZUEjCutExN8y3JICh14nZiMAt2Xj8ty3F99ufBcV3xvV7GnqvofcaO6Ch9264EReTCbJR7zWXxWgc64d7mTQYZmGpEgrRVLNEdG0gDZ9P05GhqCO54Pej05Fa1JHRR64jQ1FHMpn1THTkvHOCjpyZpvj/AgAA//8HI24Y`
| = ` |
K-isimler.js | //--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
const Discord = require('discord.js');
const moment = require('moment');
const chalk = require('chalk');
const db = require('quick.db')
const ayarlar = require('../ayarlar');
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
exports.run = async (client, message, args) => {
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
const botayar = ayarlar.bot
const kanallar = ayarlar.kanallar
const roller = ayarlar.roller
const botconfig = ayarlar.config
const prefix = botconfig.prefix | const ok = client.emojis.cache.get(ayarlar.emojiler.discow_ok)
const msunucu = message.guild
const muye = message.member
const msahip = message.author
const mkanal = message.channel
const rgun = moment(new Date().toISOString()).format('DD')
const ray = moment(new Date().toISOString()).format('MM').replace("01", "Ocak").replace("02","Şubat").replace("03","Mart").replace("04", "Nisan").replace("05", "Mayıs").replace("06", "Haziran").replace("07", "Temmuz").replace("08", "Ağustos").replace("09", "Eylül").replace("10","Ekim").replace("11","Kasım").replace("12","Aralık")
const ryıl = moment(new Date().toISOString()).format('YYYY')
const rsaat = moment(new Date().toISOString()).format('HH:mm:ss')
const rcre = `${rgun} ${ray} ${ryıl} | ${rsaat}`
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
function gonder(mesaj) {
mkanal.send(discow.setDescription(mesaj)).then(x => x.delete({ timeout: 15000 }))
}
function hata(mesaj) {
mkanal.send(discow.setDescription(`${dikkat} ${mesaj} ${dikkat}`)).then(x => x.delete({ timeout: 15000 }))
}
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
//if(!botayar.sahipler.includes(msahip.id)) return hata(`**Bu Komutu Sadece \`Sahibim\` Kullanabilir.**`)
if(!muye.roles.cache.get(roller.yetkilistaff) && !muye.hasPermission("ADMINISTRATOR") && !botayar.sahipler.includes(msahip.id)) return hata(`**Bu Komutu Sadece \`Yetkililer\` Kullanabilir.**`)
//if(!muye.hasPermission("ADMINISTRATOR") && !botayar.sahipler.includes(msahip.id)) return hata(`**Bu Komutu Sadece \`Yöneticiler\` Kullanabilir.**`)
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
const kullanıcı = message.mentions.members.first() || message.guild.members.cache.get(args[0])
if(!kullanıcı) return hata(`**Lütfen Bir** **\`Kullanıcı\`** **Belirt.**`)
const secim = args[1]
if(secim === "sifirla") {
if(!message.member.hasPermission("ADMINISTRATOR") && !botayar.sahipler.includes(message.author.id)) return;
gonder(`${ok} **${kullanıcı} Kullanıcısının İsim Geçmişi Başarıyla Temizlendi.** ${tik}`)
db.delete("İsimler&"+kullanıcı.id)
}
let isis = ``
var sayi = 1
if(db.get("İsimler&"+kullanıcı.id)) isis = `${db.get("İsimler&"+kullanıcı.id).map(x => `**\`${sayi++}\`. [\`${x.isim}\`] / [\`${x.cinsiyet}\`] / [${x.yetkili}] / [\`${x.tarih}\`]**`).join("\n")}`
if(db.get("İsimler&"+kullanıcı.id) === undefined) isis = `**\`\`\`Bulunamadı!\`\`\`**`
if(db.get("İsimler&"+kullanıcı.id) === null) isis = `**\`\`\`Bulunamadı!\`\`\`**`
if(!db.get("İsimler&"+kullanıcı.id)) isis = `**\`\`\`Bulunamadı!\`\`\`**`
if(db.get("İsimler&"+kullanıcı.id) === 0) isis = `**\`\`\`Bulunamadı!\`\`\`**`
gonder(`${ok} **${kullanıcı} Kullanıcısının İsim Geçmişi :**
${isis}`)
//--------------------------------------------------//--------------------------------------------------//--------------------------------------------------
}
exports.conf = {
aliases: ['isimler', 'nicknames', 'gecmis', 'gecmiş', 'geçmiş', 'geçmis'],
};
exports.help = {
name: 'İsimler Komutu',
};
//--------------------------------------------------//--------------------------------------------------//-------------------------------------------------- | const discow = new Discord.MessageEmbed().setColor('BLACK').setFooter(`${botconfig.footer}`, message.author.avatarURL({ dynamic: true, size: 2048 })).setTimestamp()
const dikkat = client.emojis.cache.get(ayarlar.emojiler.discow_carpi)
const tik = client.emojis.cache.get(ayarlar.emojiler.discow_tik) |
create_seqlib_dialog.py | from __future__ import print_function
import Tkinter as tk
import ttk
import tkSimpleDialog
from collections import OrderedDict
from ..barcode import BarcodeSeqLib
from ..barcodevariant import BcvSeqLib
from ..barcodeid import BcidSeqLib
from ..basic import BasicSeqLib
from ..idonly import IdOnlySeqLib
from ..overlap import OverlapSeqLib
SEQLIB_LABEL_TEXT = OrderedDict(
[
("BcvSeqLib", "Barcoded Variant"),
("BcidSeqLib", "Barcoded Identifier"),
("OverlapSeqLib", "Overlap"),
("BasicSeqLib", "Basic"),
("BarcodeSeqLib", "Barcodes Only"),
("IdOnlySeqLib", "Identifiers Only"),
]
)
#: map class names to class definitions to avoid use of globals()
SEQLIB_CLASSES = {
"BarcodeSeqLib": BarcodeSeqLib,
"BcvSeqLib": BcvSeqLib,
"BcidSeqLib": BcidSeqLib,
"BasicSeqLib": BasicSeqLib,
"IdOnlySeqLib": IdOnlySeqLib,
"OverlapSeqLib": OverlapSeqLib,
}
class CreateSeqLibDialog(tkSimpleDialog.Dialog):
"""
Dialog box for creating a new SeqLib.
"""
def __init__(self, parent_window, title="New SeqLib"):
self.element_tkstring = tk.StringVar()
self.element_type = None
tkSimpleDialog.Dialog.__init__(self, parent_window, title)
def body(self, master):
|
def buttonbox(self):
"""
Display only one button.
"""
box = tk.Frame(self)
w = tk.Button(box, text="OK", width=10, command=self.ok, default="active")
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.ok)
box.pack()
def apply(self):
try:
self.element_type = SEQLIB_CLASSES[self.element_tkstring.get()]
except KeyError:
raise KeyError("Unrecognized element type.")
| message = ttk.Label(master, text="SeqLib type:")
message.grid(column=0, row=0)
for i, k in enumerate(SEQLIB_LABEL_TEXT.keys()):
rb = ttk.Radiobutton(
master,
text=SEQLIB_LABEL_TEXT[k],
variable=self.element_tkstring,
value=k,
)
rb.grid(column=0, row=(i + 1), sticky="w")
if i == 0:
rb.invoke() |
clean_test_app.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 18:54:48 2020
@author: dylanroyston
"""
# -*- coding: utf-8 -*-
# import packages
#import dash_player
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import psycopg2
import os
import pandas as pd
import numpy as np
import plotly
import plotly.express as px
import plotly.graph_objects as go
import librosa
import librosa.display as ld
import IPython.display as ipd
import pylab as pl
import boto3
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from matplotlib import cm
#from colorspacious import cspace_converter
#from collections import OrderedDict
######
# connect to PSQL and retrieve
psql_usr = os.environ.get('PSQL_USR')
psql_pw = os.environ.get('PSQL_PW')
conn = psycopg2.connect(host = 'ec2-13-58-251-142.us-east-2.compute.amazonaws.com',
dbname = 'spectralize',
user='postgres',
password=psql_pw)
##### read out metadata
metadata = conn.cursor()
metadata.execute("SELECT * FROM clean_metadata WHERE false;")
cols = set(metadata.fetchall())
metadata.execute("SELECT * FROM clean_metadata;")
md = set(metadata.fetchall())
cols = ["s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year"]
tag_df = pd.DataFrame(data=md, columns=cols)
##### s3 acess for playing audio files
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3_bucket)
# placeholders for callback initialization
standin_fp = '/home/dylanroyston/Documents/GIT/spectralize/app/hello.wav'
audio_sd_file = standin_fp
#audio_rawfile, new_sr = librosa.load(standin_fp, sr=None)
standin_data = np.array([[0,0],[0,0]])
standin_df = pd.DataFrame(standin_data, columns=['x','y'])
#audio_fig = px.line(standin_df, x='x', y='y', title='audio data', render_mode='webgl')
spec_fig = px.imshow(standin_df)
def load_audio_data(selected_row):
# read out audio data
#curr_song_id = tag_df.iloc[selected_row]['song_id']
curr_song_id = selected_row
# audiodata = conn.cursor()
# qstring = 'SELECT intensity FROM clean_audio WHERE song_id=' + str(curr_song_id)
# audiodata.execute(qstring)
# ad = np.array(audiodata.fetchall())
# audio_df = pd.DataFrame(data=ad, columns=['I'])
# audio_fig = px.line(audio_df, x=audio_df.index, y='I', title='audio data', render_mode='webgl')
# audio_fig.update_layout(
# height=250,
# margin_r=0,
# margin_l=0,
# margin_t=0,
# yaxis_title='',
# yaxis_fixedrange=True)
s3_key = tag_df.iloc[curr_song_id]['s3_key']
#this_row = tag_df.loc[tag_df['song_id'] == curr_song_id]
#s3_key = tag_df.iloc[this_row]['s3_key']
ext = s3_key[-4:]
audio_sd_file = '/home/dylanroyston/Documents/GIT/spectralize/app/audio_file' + ext
bucket.download_file(s3_key, audio_sd_file)
#audio_rawfile = librosa.load(audio_sd_file)
return audio_sd_file#, audio_fig
def | (selected_row):
curr_song_id = selected_row
specdata = conn.cursor()
qstring = 'SELECT * FROM clean_spec WHERE song_id=' + str(curr_song_id)
specdata.execute(qstring)
sd = np.array(specdata.fetchall())
spec_df = pd.DataFrame(data=sd)
#currtitle = tag_df.iloc[curr_song_id]['title']
#currdur = tag_df.iloc[curr_song_id]['duration']
# numpts = len(sd)
# interval = float(currdur) / numpts
# timeline = np.linspace(0,float(currdur),numpts)
# rt = timeline.round(0)
trim_sd = spec_df.iloc[:,2:]
spec_fig = px.imshow(trim_sd.transpose(),
origin='lower',
#title=currtitle,
#x=timeline
)
spec_fig.update_layout(
height=250,
margin_r=0,
margin_l=0,
margin_t=0,
yaxis_title='Frequency',
xaxis_title='Time',
#colorbar.title='power',
yaxis_fixedrange=True,
#x=str(rt)
#title=currtitle
)
return spec_fig
#####
# initialize Dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
# header
html.H1(children='Metadata'),
# metadata table
dash_table.DataTable(
id = 'metadata_table',
data=tag_df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in tag_df.columns],
style_cell={
'overflowX': 'auto',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 10,
'row_selectable': 'single',
'font_family': 'Arial',
'font_size': '1.5rem',
'padding': '.5rem',
'backgroundColor': '#f4f4f2'
},
style_cell_conditional=[
{'textAlign': 'center'}
],
style_header={
'backgroundColor':'#f4f4f2',
'fontWeight': 'bold',
'overflowX': 'auto',
'textOverflow': 'ellipsis'
},
style_table={
'maxHeight':'500px',
'overflowX': 'scroll'
},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in tag_df.to_dict('rows')
],
tooltip_duration=None,
style_as_list_view=True,
),# end table
# load audio button
html.Br(),
html.Div(
[
dcc.Input(id='input_songnum', value='input song number', type='number'),
html.Button('Load audio',
id='submit-val',
style={'display': 'inline-block'},
n_clicks=0),
html.Div(id='song_input')
],
),
html.Br(),
# html.Audio(id="player", src=audio_sd_file, controls=True, style={
# "width": "100%"
# }),
# dash_player.DashPlayer(
# id='player',
# url='audio_sd_file',
# controls=True
# ),
html.Br(),
#dcc.Graph(id='waveform', figure=audio_fig),
html.Br(),
dcc.Graph(id='spect', figure=spec_fig)
])
##### finish Dash layout
##### callbacks
# load-audio button control
# @app.callback(
# Output('input_songnum', 'value'),
# [Input('submit-val', 'n_clicks')]
# )
# def retrieve_audio(value):
# return load_audio_data(value)
# @app.callback(
# Output('waveform', 'figure'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_A_figure(submit_val):
# audio_fig = load_audio_data(submit_val)
# return audio_fig
## update audio player
# @app.callback(
# Output('player', 'src'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_player(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# return audio_sd_file
## update spect figure on button click
@app.callback(
Output('spect', 'figure'),
[Input('submit-val', 'n_clicks'),
Input('input_songnum', 'value')]
)
def update_S_figure(n_clicks, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit-val' in changed_id:
spec_fig = load_spec_data(value)
return spec_fig
## combined audiofile/spec update
# @app.callback(
# [Output('player', 'src'),
# Output('spect', 'figure')],
# [Input('submit-val', 'n_clicks')]
# )
# def update_figures(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# spec_fig = load_spec_data(submit_val)
# return audio_sd_file, spec_fig
# @app.callback(
# Output('metadata_table', 'derived_virtual_selected_rows'),
# [Input('submit-val', 'n_clicks'),
# State('metadata_table', 'derived_virtual_selected_rows')]
# )
# def update_audio(n_clicks, derived_virtual_selected_rows):
# if derived_virtual_selected_rows is None:
# derived_virtual_selected_rows = []
# return load_audio_data(derived_virtual_selected_rows)
if __name__ == '__main__':
#app.run_server(debug=True, port=8050, host='127.0.0.1')
app.run_server(debug=True, port=8050, host='127.0.0.1')
#app.run_server(debug=True, port=80, host='ec2-18-224-114-72.us-east-2.compute.amazonaws.com')
| load_spec_data |
CreateEmailIdentityPolicyCommand.ts | import { SESv2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SESv2Client";
import { CreateEmailIdentityPolicyRequest, CreateEmailIdentityPolicyResponse } from "../models/models_0";
import {
deserializeAws_restJson1CreateEmailIdentityPolicyCommand,
serializeAws_restJson1CreateEmailIdentityPolicyCommand,
} from "../protocols/Aws_restJson1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type CreateEmailIdentityPolicyCommandInput = CreateEmailIdentityPolicyRequest;
export type CreateEmailIdentityPolicyCommandOutput = CreateEmailIdentityPolicyResponse & __MetadataBearer;
export class | extends $Command<
CreateEmailIdentityPolicyCommandInput,
CreateEmailIdentityPolicyCommandOutput,
SESv2ClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: CreateEmailIdentityPolicyCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: SESv2ClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<CreateEmailIdentityPolicyCommandInput, CreateEmailIdentityPolicyCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "SESv2Client";
const commandName = "CreateEmailIdentityPolicyCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: CreateEmailIdentityPolicyRequest.filterSensitiveLog,
outputFilterSensitiveLog: CreateEmailIdentityPolicyResponse.filterSensitiveLog,
};
if (typeof logger.info === "function") {
logger.info({
clientName,
commandName,
});
}
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: CreateEmailIdentityPolicyCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_restJson1CreateEmailIdentityPolicyCommand(input, context);
}
private deserialize(
output: __HttpResponse,
context: __SerdeContext
): Promise<CreateEmailIdentityPolicyCommandOutput> {
return deserializeAws_restJson1CreateEmailIdentityPolicyCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
}
| CreateEmailIdentityPolicyCommand |
DolfinPDESolver.py | """
DolfinPDESolver.py
==================
A python class structure written to interface CellModeller4 with the FEniCs/Dolfin finite element library.
Intended application: hybrid modelling of a microbial biofilm.
- Update: parameter input streamlined. New moving boundary mesh type.
- Update: added in-built test functions.
Created: W. P. J. Smith, 13.01.15
Updated: W. P. J. Smith, 22.03.15
Updated: W. P. J. Smith, 23.03.15
"""
try:
from dolfin import *
except ImportError:
print "Error: could not import dolfin library."
print "Try calling $ source /Applications/FEniCS.app/Contents/Resources/share/fenics/TestFenicsPath.conf "
import numpy
import math
from pyopencl.array import vec
class DolfinSolver:
def __init__(self, solverParams):
"""
Initialise the dolfin solver using a dictionary of params.
"""
# extract fixed params from params dictionary
self.pickleSteps = solverParams['pickleSteps']
self.h = solverParams['h']
self.origin = solverParams['origin']
self.N_x = int(solverParams['N_x'])
self.N_y = int(solverParams['N_y'])
self.L_x = solverParams['L_x']
self.L_y = solverParams['L_y']
self.u0 = solverParams['u0']
self.K = solverParams['K']
self.mu_eff = solverParams['mu_eff']
self.delta = solverParams['delta']
# some params we will have to calculate on the fly: set them to 0 for now
self.N_z = 0 # number of *canonical* elements in z
self.Lz_b = 0.0 # height at which to apply bulk boundary condition
self.W = 0.0 # thickness of mesh buffer layer
# some attributes that we'll update on the fly: set them to None for now
self.boundaryCondition = None
self.mesh = None
self.V = None
self.solution = None
def SolvePDE(self, centers, vols, filename, dir, stepNum=0):
"""
High-level function to be called during the function.
"""
# get height of highest cell in domain
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# update mesh, function space and BCs
# TODO: add a better toggle here
self.mesh = self.DoubleBufferedMesh(max_height)
#self.mesh = self.RegularMesh()
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
#G = self.VolumeFractionOnElements()
#g = Function(self.V, name = "Volume fraction")
#g.interpolate(G)
#self.WriteFieldToFile(dir+filename+'_VolFracsCheck'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
if stepNum % self.pickleSteps == 0:
self.WriteFieldToFile(dir+filename+'.pvd', self.solution)
# interpolate solution to cell centres
u_local = self.InterpolateToCenters(centers)
return u_local
def NewtonIterator(self):
"""
A Newton iterator for solving non-linear problems.
/!\ Assumes that function space (V), boundaryCondition, vol_fracs are up-to-date.
"""
# Define variational problem
u = Function(self.V, name = "Nutrient")
v = TestFunction(self.V)
F = dot(grad(u), grad(v))*dx - self.MonodNutrientSink(u)*v*dx
# Call built-in Newton solver
#set_log_level(PROGRESS) # very detailed info for testing
set_log_level(WARNING) # near-silent info for simulations
#set_log_active(False) # suppress solver text
solve(F == 0, u, self.boundaryCondition, solver_parameters = {"newton_solver":
{"relative_tolerance": 1e-6}})
self.solution = u
def set_bcs(self):
"""
Initialise boundary conditions on the mesh.
/!\ Assumes that global variable Lz_b is up-to-date.
"""
dbc = TopDirichletBoundary()
self.boundaryCondition = DirichletBC(self.V, Constant(self.u0), dbc)
def DoubleBufferedMesh(self, max_height):
"""
Given a boundary height Lz_b, returns a FEniCS mesh object with
- canonical elements in the bottom of the cell domain (cells are always counted onto the same mesh)
- buffer elements at the top of the cell domain (so upper boundary can have an arbitrary height.
Having a double buffer layer avoids generating low-volume elements if Lz_b ~ n*h, but adds the constraint that
delta >= 2*h.
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
"""
global Lz_b
# Read off fixed parameters
L_x = self.L_x; N_x = self.N_x
L_y = self.L_y; N_y = self.N_y
delta = self.delta
h = self.h
# Calculate DBM dimensions
Lz_b = max_height + delta # height in um at which to apply bulk BC
A = int(Lz_b // h) # number of whole cubes that fit under Lz_b
B = Lz_b % h # remainder for this division
W = B + h # thickness of buffer layer in um
# Update mesh variables
self.Lz_b = Lz_b
self.W = W
self.N_z = A-1
self.L_z = (A-1)*h
# Create the node cloud and connectivity
P = N_x+1; Q = N_y+1; R = A+2;
cloud = self.GetDBLNodeCloud(P, Q, R)
TRI = self.GetNodeConnectivity(P, Q, R)
# Reformat the arrays to a datatype that FEniCS likes
cells = numpy.array(TRI, dtype=numpy.uintp)
nodes = numpy.array(cloud, dtype=numpy.double)
# Pass the node and cell arrays to the FEniCS mesh editor
mesh = Mesh(); editor = MeshEditor()
editor.open(mesh, 3, 3); editor.init_vertices(nodes.shape[0]); editor.init_cells(cells.shape[0])
[editor.add_vertex(i,n) for i,n in enumerate(nodes)]
[editor.add_cell(i,n) for i,n in enumerate(cells)]
editor.close()
return mesh
def RegularMesh(self):
"""
/!\ Exports boundary height Lz_b as a global variable, to be used by TopDirichletBoundary.
Assumes that L_z (not user-specified) is the same as L_x.
"""
global Lz_b
# Update mesh variables: choose z coordinates from x coordinates.
# L_z (counting) and Lz_b (boundary) are now the same and equal to L_x.
# We're not building a mesh manually, so we don't need to define W.
Lz_b = self.L_x
self.Lz_b = Lz_b
self.N_z = int(self.N_x)
self.L_z = Lz_b
# use an inbuilt regular mesh
p0 = self.origin
mesh = BoxMesh(p0[0],p0[1],p0[2],self.L_x,self.L_y,self.L_z,self.N_x,self.N_y,self.N_z)
return mesh
def GetDBLNodeCloud(self, P, Q, R):
"""
Compute node locations for a double-buffer-layer mesh
"""
x = numpy.linspace(0.0, self.L_x, num = P)
y = numpy.linspace(0.0, self.L_y, num = Q)
z = numpy.linspace(0.0, (self.N_z+2)*self.h, num = R)
(X, Y, Z) = numpy.meshgrid(x, y, z, indexing ='ij')
# Move the top two layers to make the buffer layer
Z[:,:,-1] = self.Lz_b;
Z[:,:,-2] = self.Lz_b - 0.5*self.W;
# Flatten into a 3-by-(Num_nodes) array
cloud = numpy.vstack((X.flatten('F'), Y.flatten('F'), Z.flatten('F'))).T
return cloud
def GetNodeConnectivity(self, P, Q, R):
"""
Compute the connectivity TRI of a regular grid of points
"""
# Create an P-by-Q-by-R array of integers, numbering along x then y then z
(pp,qq,rr) = numpy.meshgrid(range(0,P),range(0,Q),range(0,R),indexing='ij');
inds = numpy.vstack((pp.flatten('F'), qq.flatten('F'), rr.flatten('F'))).T
# In each direction, remove the last set of nodes (non-origin nodes)
mask = ((inds[:,0]==self.N_x) + (inds[:,1]==self.N_y) + (inds[:,2]==self.N_z+2) == False)
inds_p = inds[mask]
nods_p = inds_p[:,0] + P*inds_p[:,1] + P*Q*inds_p[:,2]
# Compute the stencil defining the 6 tetrahedra associated with a given origin
stencil = self.GetConnectivityStencil(P, Q)
# For each origin node, define the 6 associated elements; compile to list TRI
K = numpy.tile(nods_p.T, (6, 1))
TRI = (numpy.tile(K.flatten('F'), (4,1)) + numpy.tile(stencil, (len(nods_p),1) ).T).T
return TRI
def GetConnectivityStencil(self, P, Q):
"""
Given the vertices of a cube, group these points into 6 identical tetrahedra
"""
stencil = numpy.array([[0, 1, P+1, P*(Q+1)+1], \
[0, 1, P*Q+1, P*(Q+1)+1], \
[0, P*Q, P*Q+1, P*(Q+1)+1], \
[0, P, P+1, P*(Q+1)+1], \
[0, P*Q, P*(Q+1), P*(Q+1)+1], \
[0, P, P*(Q+1), P*(Q+1)+1]])
return stencil
def | (self, Point, cubeOrigin):
"""
Given mesh cube, assign which tetrahedron a point is in.
/!\ Assumes tetrahedron is part of a cube.
"""
Origin = cubeOrigin
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
a_x = Origin[0]; a_y = Origin[1]; a_z = Origin[2]
dx = p_x - a_x
dy = p_y - a_y
dz = p_z - a_z
t = 1*(dy - dz > 0) + 2*(dz - dx > 0) + 4*(dx - dy > 0)
conv_vec = [3,4,5,1,0,2]
return conv_vec[t-1]
def GetCubeIndex(self, Point):
"""
Given mesh dimensions, assign which cube a point is in.
"""
p_x = Point[0]; p_y = Point[1]; p_z = Point[2]
p = int(numpy.floor(p_x*self.N_x / float(self.L_x))) # index along x
q = int(numpy.floor(p_y*self.N_y / float(self.L_y))) # index along y
r = int(numpy.floor(p_z*self.N_z / float(self.L_z))) # index along z
c = p + q*self.N_x + r*self.N_x*self.N_y # global index of this cube
cubeOrigin = [p*self.L_x/float(self.N_x),\
q*self.L_y/float(self.N_y),\
r*self.L_z/float(self.N_z)] # coordinates of this cube's origin
return int(c), cubeOrigin
def GetElementIndex(self, point):
"""
Get tetrahedron and cube indices and calculate global element index.
"""
[c, cubeOrigin] = self.GetCubeIndex(point)
t = self.GetTetrahedronIndex(point,cubeOrigin)
return t + 6*c
def AssignElementsToData(self, centers):
"""
Sort cell centres into their respective mesh elements.
"""
N = centers.shape[0]
elements = numpy.zeros((N), numpy.int32)
for i in range(0,N):
point = centers[i]
elements[i] = self.GetElementIndex(point)
return elements
def GetVolFracs(self, centers, vols):
"""
Create a global list of the cell volume fractions in mesh elements.
Assumes that self.mesh and self.h are up-to-date.
/!\ Exports the array vol_fracs as a global array, for use by VolumeFraction.
"""
global vol_fracs
# assign elements of cells
elements = self.AssignElementsToData(centers)
# need to define volume fraction for every element in the mesh
# (not just the canonical elements for counting)
num_elements = self.mesh.num_cells()
# sum cell volumes over each element
v = math.pow(self.h, 3) / 6.0
vol_fracs = numpy.bincount(elements,vols,num_elements) / v
def InterpolateToCenters(self, centers):
"""
Interpolate a solution object u onto a list of cell coordinates
"""
u = self.solution
data_t = tuple(map(tuple, centers)) # Convert to tuple format
u_local = numpy.zeros((len(data_t),),numpy.float64) # preallocate solution array
for i in range(0,len(data_t)): # loop over all cells
u_local[i] = u(data_t[i][0:3]) # extrapolate solution value at cell centre
return u_local
def WriteFieldToFile(self, filename, u):
"""
Export the PDE solution as a pvd mesh.
"""
print "Writing fields..."
File(filename) << u
print 'Done.'
def MonodNutrientSink(self, u):
"""
Monod function with which to build RHS.
"""
a = Constant(self.mu_eff)
b = Constant(self.K)
return -1 * a * u * VolumeFraction() / (b + u)
def VolumeFractionOnElements(self):
"""
Monod function with which to build RHS.
"""
return VolumeFraction()
# ==================== In-built test functions ====================== #
def TestProblem_A(self, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
"""
# intiate mesh (regular mesh uses L_z = L_x)
self.mesh = self.RegularMesh()
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'We have %i elements in the mesh' % N
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
# load some imaginary cells onto the mesh
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_B(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
"""
# intiate mesh
self.mesh = self.DoubleBufferedMesh(max_height)
# The following are now defined (as attributes of self):
# o L_z is the canonical height
# o Lz_b is the boundary height
# o N_z is the number of canonical cubes in the z direction
# o W is the thickness of the buffer layer in um.
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
# Use cell centres to evaluate volume occupancy of mesh
# Note that Vol fracs must define an occupancy for EVERY element in the mesh - not just canonical ones.
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_C(self, dir, filename, max_height):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
Imaginary cells are placed at the centroids of some of the canonical elements ONLY.
You can check this by eye, since we export the volume fraction function too.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define an X-Y periodic function space
pbc = XYPeriodicDomain()
self.V = FunctionSpace(self.mesh, "CG", 1, constrained_domain=pbc)
# set boundary conditions in this mesh
self.set_bcs()
# Number of elements to fill (canonical only, only up to L_z = 10)
N_can = 3*self.N_x*self.N_y*(self.N_z+1)
# load some imaginary cells onto the mesh
centers = numpy.zeros((N_can,), vec.float4)
for cell_no in range(N_can):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N_can,))
# Use cell centres to evaluate volume occupancy of mesh
# Note that Vol fracs must define an occupancy for EVERY element in the mesh - not just canonical ones.
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_D(self, dir, filename, centers, vols):
"""
Solves the non-homogenous reaction-diffusion problem on a double-buffered mesh.
This time, we use realistic cell date, imported as centers and vols.
/!\ L_x and L_y are exported to XYPeriodicDomain's map function as global variables.
"""
# Make sure that the periodic boundary mapping function can see domain sizes
global L_x
global L_y
L_x = self.L_x
L_y = self.L_y
# compute max height
max_height = 0.0
for center in centers:
hz = center[2]
if hz > max_height:
max_height = hz
print 'max height is %f' % max_height
# intiate mesh and function space
self.mesh = self.DoubleBufferedMesh(max_height)
# define a non-periodic function space
self.V = FunctionSpace(self.mesh, "CG", 1)
#pbc = XYPeriodicDomain() # doesn't make sense if cell data are not periodic
#self.V = FunctionSpace(self.mesh, "CG", 1, constrained_domain=pbc)
# set boundary conditions in this mesh
self.set_bcs()
# compute volume fractions
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
def TestProblem_A_old(self, num_refinements, dir, filename):
"""
Solves the homogenised reaction-diffusion problem on a standard mesh.
This mesh can be refined as many times as desired.
Imaginary cells are placed at the centroids of each element, so that vol_fracs should evaluate to 1 everywhere.
You can check this by eye, since we export the volume fraction function too.
There's something odd going on with this one: volume fractions are assigned incorrectly if number of refinements > 1.
"""
# intiate mesh (regular mesh uses L_z = L_x)
self.mesh = self.RegularMesh()
# refine mesh, updating mesh parameter as we go
for i in range(0,num_refinements):
self.mesh = refine(self.mesh)
self.h = 0.5*self.h
self.N_x = 2*self.N_x
self.N_y = 2*self.N_y
self.N_z = 2*self.N_z
# set the function space and boundary conditions in this mesh
self.V = FunctionSpace(self.mesh, "CG", 1)
self.set_bcs()
N = self.mesh.num_cells()
print 'With %i refinement step(s), we have %i elements in the mesh' % (num_refinements, N)
print 'Mesh divisions, as seen by counting gear, will be Nx=%i, Ny=%i, Nz=%i.' % (self.N_x, self.N_y, self.N_z)
print 'Mesh dimensions, as seen by counting gear, will be Lx=%i, Ly=%i, Lz=%i.' % (self.L_x, self.L_y, self.L_z)
print 'Finally, mesh parameter h is %f' % self.h
# load some imaginary cells onto the mesh
centers = numpy.zeros((N,), vec.float4)
for cell_no in range(N):
centroid = Cell(self.mesh, cell_no).midpoint()
centers[cell_no][0] = centroid.x()
centers[cell_no][1] = centroid.y()
centers[cell_no][2] = centroid.z()
# Give each cell the volume of the element it's in
element_volume = math.pow(self.h, 3) / 6.0
vols = element_volume * numpy.ones((N,))
# Use cell centres to evaluate volume occupancy of mesh
self.GetVolFracs(centers, vols)
# Export a meshfile showing element occupancies
G = self.VolumeFractionOnElements()
g = Function(self.V, name = "Volume fraction")
g.interpolate(G)
self.WriteFieldToFile(dir+filename+'_VolFracs'+'.pvd', g)
# call a solver and save the solution
self.NewtonIterator()
self.WriteFieldToFile(dir+filename+'_Solution'+'.pvd', self.solution)
# ============= Supporting classes for defining the PDE ============= #
class TopDirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
"""
Determine whether point x lies on the Dirchlet Boundary subdomain.
/!\ Assumes Lz_b is supplied as a global variable.
"""
global Lz_b
return bool(near(x[2],Lz_b) and on_boundary)
class XYPeriodicDomain(SubDomain):
def inside(self, x, on_boundary):
"""
Return true if we are on either of the two master boundaries.
/!\ Assumes that origin = [0,0,z]!
"""
return bool((near(x[0], 0.0) or near(x[1], 0.0)) and on_boundary)
def map(self, x, y):
"""
Map points on the slave boundaries to the corresponding master boundaries.
/!\ Takes L_x and L_y as global variables.
"""
global L_x
global L_y
if near(x[0], L_x) and near(x[1], L_y):
y[0] = x[0] - L_x
y[1] = x[1] - L_y
y[2] = x[2]
elif near(x[0], L_x):
y[0] = x[0] - L_x
y[1] = x[1]
y[2] = x[2]
elif near(x[1], L_y):
y[0] = x[0]
y[1] = x[1] - L_y
y[2] = x[2]
else:
y[0] = x[0]
y[1] = x[1]
y[2] = x[2]
class VolumeFraction(Expression):
def eval_cell(self, value, x, ufc_cell):
"""
Evaluate the cell volume fraction for this mesh element.
/!\ Assumes vol_fracs is being supplied as a global variable.
"""
global vol_fracs
value[0] = vol_fracs[ufc_cell.index]
| GetTetrahedronIndex |
payment.py | # -*- coding: utf-8 -*-
from trytond.pool import PoolMeta
from trytond.model import fields, ModelSQL, ModelView
from trytond.transaction import Transaction
__metaclass__ = PoolMeta
__all__ = ['MagentoPaymentGateway', 'Payment']
class MagentoPaymentGateway(ModelSQL, ModelView):
"""
This model maps the available payment gateways from magento to tryton.
"""
__name__ = 'magento.instance.payment_gateway'
_rec_name = 'title'
name = fields.Char("Name", required=True, select=True)
title = fields.Char('Title', required=True, select=True)
gateway = fields.Many2One(
'payment_gateway.gateway', 'Gateway', required=True,
ondelete='RESTRICT', select=True,
)
channel = fields.Many2One(
'sale.channel', 'Magento Channel', readonly=True, select=True, |
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(MagentoPaymentGateway, cls).__setup__()
cls._sql_constraints += [
(
'name_channel_unique', 'unique(name, channel)',
'Payment gateway already exist for this channel'
)
]
@classmethod
def create_all_using_magento_data(cls, magento_data):
"""
Creates record for list of payment gateways sent by magento.
It creates a new gateway only if one with the same name does not
exist for this channel.
"""
gateways = []
for data in magento_data:
gateway = cls.find_using_magento_data(data)
if gateway:
gateways.append(gateway)
else:
gateways.append(cls.create_using_magento_data(data))
return gateways
@classmethod
def create_using_magento_data(cls, gateway_data):
"""
Create record for gateway data sent by magento
"""
raise NotImplementedError
@classmethod
def find_using_magento_data(cls, gateway_data):
"""
Search for an existing gateway by matching name and channel.
If found, return its active record else None
"""
try:
gateway, = cls.search([
('name', '=', gateway_data['name']),
('channel', '=', Transaction().context['current_channel']),
])
except ValueError:
return None
else:
return gateway
class Payment:
__name__ = "sale.payment"
magento_id = fields.Integer('Magento ID', readonly=True)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(Payment, cls).__setup__()
# TODO: Add validation to make sure payment magento id per channel
# is unique! | domain=[('source', '=', 'magento')]
) |
hello.ts | //created by Kevin - (https://github.com/Kyukishi)
//simple hello command
import{
Discord,
SimpleCommand,
SimpleCommandMessage,
}from 'discordx';
@Discord()
class | {
@SimpleCommand('hello', {aliases: ['hi']})
hello(command: SimpleCommandMessage){
command.message.reply(`👋 ${command.message.member}`);
}
} | helloCommand |
date.rs | //! This module defines the [DateFlag]. To set it up from [ArgMatches], a [Config] and its
//! [Default] value, use its [configure_from](Configurable::configure_from) method.
use super::Configurable;
use crate::app;
use crate::config_file::Config;
use crate::print_error;
use clap::ArgMatches;
/// The flag showing which kind of time stamps to display.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DateFlag {
Date,
Relative,
ISO,
Formatted(String),
}
impl DateFlag {
/// Get a value from a date format string
fn from_format_string(value: &str) -> Option<Self> {
match app::validate_time_format(&value) {
Ok(()) => Some(Self::Formatted(value[1..].to_string())),
_ => {
print_error!("Not a valid date format: {}.", value);
None
}
}
}
/// Get a value from a str.
fn from_str(value: &str) -> Option<Self> {
match value {
"date" => Some(Self::Date),
"relative" => Some(Self::Relative),
_ if value.starts_with('+') => Self::from_format_string(&value),
_ => {
print_error!("Not a valid date value: {}.", value);
None
}
}
}
}
impl Configurable<Self> for DateFlag {
/// Get a potential `DateFlag` variant from [ArgMatches].
///
/// If the "classic" argument is passed, then this returns the [DateFlag::Date] variant in a
/// [Some]. Otherwise if the argument is passed, this returns the variant corresponding to its
/// parameter in a [Some]. Otherwise this returns [None].
fn from_arg_matches(matches: &ArgMatches) -> Option<Self> {
if matches.is_present("classic") {
Some(Self::Date)
} else if matches.occurrences_of("date") > 0 {
match matches.value_of("date") {
Some("date") => Some(Self::Date),
Some("relative") => Some(Self::Relative),
Some(format) if format.starts_with('+') => {
Some(Self::Formatted(format[1..].to_owned()))
}
_ => panic!("This should not be reachable!"),
}
} else {
None
}
}
/// Get a potential `DateFlag` variant from a [Config].
///
/// If the `Config::classic` is `true` then this returns the Some(DateFlag::Date),
/// Otherwise if the `Config::date` has value and is one of "date" or "relative",
/// this returns its corresponding variant in a [Some].
/// Otherwise this returns [None].
fn from_config(config: &Config) -> Option<Self> {
if let Some(true) = &config.classic {
return Some(Self::Date);
}
if let Some(date) = &config.date {
Self::from_str(&date)
} else {
None
}
}
/// Get a potential `DateFlag` variant from the environment.
fn from_environment() -> Option<Self> {
if let Ok(value) = std::env::var("TIME_STYLE") {
match value.as_str() {
"full-iso" => Some(Self::Formatted("%F %T.%f %z".into())),
"long-iso" => Some(Self::Formatted("%F %R".into())),
"iso" => Some(Self::ISO),
_ if value.starts_with('+') => Self::from_format_string(&value),
_ => {
print_error!("Not a valid date value: {}.", value);
None
}
}
} else {
None
}
}
}
/// The default value for `DateFlag` is [DateFlag::Date].
impl Default for DateFlag {
fn default() -> Self {
Self::Date
}
}
#[cfg(test)]
mod test {
use super::DateFlag;
use crate::app;
use crate::config_file::Config;
use crate::flags::Configurable;
#[test]
fn test_from_arg_matches_none() {
let argv = vec!["lsd"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
assert_eq!(None, DateFlag::from_arg_matches(&matches));
}
#[test]
fn test_from_arg_matches_date() {
let argv = vec!["lsd", "--date", "date"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
assert_eq!(Some(DateFlag::Date), DateFlag::from_arg_matches(&matches));
}
#[test]
fn test_from_arg_matches_relative() {
let argv = vec!["lsd", "--date", "relative"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
assert_eq!(
Some(DateFlag::Relative),
DateFlag::from_arg_matches(&matches)
);
}
#[test]
fn test_from_arg_matches_format() {
let argv = vec!["lsd", "--date", "+%F"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
assert_eq!(
Some(DateFlag::Formatted("%F".to_string())),
DateFlag::from_arg_matches(&matches)
);
}
#[test]
#[should_panic(expected = "invalid format specifier: %J")]
fn test_from_arg_matches_format_invalid() {
let argv = vec!["lsd", "--date", "+%J"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
DateFlag::from_arg_matches(&matches);
}
#[test]
fn test_from_arg_matches_classic_mode() {
let argv = vec!["lsd", "--date", "date", "--classic"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
assert_eq!(Some(DateFlag::Date), DateFlag::from_arg_matches(&matches));
}
#[test]
fn test_from_config_none() {
assert_eq!(None, DateFlag::from_config(&Config::with_none()));
}
#[test]
fn test_from_config_date() {
let mut c = Config::with_none();
c.date = Some("date".into());
assert_eq!(Some(DateFlag::Date), DateFlag::from_config(&c));
}
#[test]
fn test_from_config_relative() {
let mut c = Config::with_none();
c.date = Some("relative".into());
assert_eq!(Some(DateFlag::Relative), DateFlag::from_config(&c));
}
#[test]
fn test_from_config_format() {
let mut c = Config::with_none();
c.date = Some("+%F".into());
assert_eq!(
Some(DateFlag::Formatted("%F".to_string())),
DateFlag::from_config(&c)
);
}
#[test]
fn test_from_config_format_invalid() {
let mut c = Config::with_none();
c.date = Some("+%J".into());
assert_eq!(None, DateFlag::from_config(&c));
}
#[test]
fn | () {
let mut c = Config::with_none();
c.date = Some("relative".into());
c.classic = Some(true);
assert_eq!(Some(DateFlag::Date), DateFlag::from_config(&c));
}
#[test]
#[serial_test::serial]
fn test_from_environment_none() {
std::env::set_var("TIME_STYLE", "");
assert_eq!(None, DateFlag::from_environment());
}
#[test]
#[serial_test::serial]
fn test_from_environment_full_iso() {
std::env::set_var("TIME_STYLE", "full-iso");
assert_eq!(
Some(DateFlag::Formatted("%F %T.%f %z".into())),
DateFlag::from_environment()
);
}
#[test]
#[serial_test::serial]
fn test_from_environment_long_iso() {
std::env::set_var("TIME_STYLE", "long-iso");
assert_eq!(
Some(DateFlag::Formatted("%F %R".into())),
DateFlag::from_environment()
);
}
#[test]
#[serial_test::serial]
fn test_from_environment_iso() {
std::env::set_var("TIME_STYLE", "iso");
assert_eq!(Some(DateFlag::ISO), DateFlag::from_environment());
}
#[test]
#[serial_test::serial]
fn test_from_environment_format() {
std::env::set_var("TIME_STYLE", "+%F");
assert_eq!(
Some(DateFlag::Formatted("%F".into())),
DateFlag::from_environment()
);
}
#[test]
#[serial_test::serial]
fn test_parsing_order_arg() {
std::env::set_var("TIME_STYLE", "+%R");
let argv = vec!["lsd", "--date", "+%F"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
let mut config = Config::with_none();
config.date = Some("+%c".into());
assert_eq!(
DateFlag::Formatted("%F".into()),
DateFlag::configure_from(&matches, &config)
);
}
#[test]
#[serial_test::serial]
fn test_parsing_order_env() {
std::env::set_var("TIME_STYLE", "+%R");
let argv = vec!["lsd"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
let mut config = Config::with_none();
config.date = Some("+%c".into());
assert_eq!(
DateFlag::Formatted("%R".into()),
DateFlag::configure_from(&matches, &config)
);
}
#[test]
#[serial_test::serial]
fn test_parsing_order_config() {
std::env::set_var("TIME_STYLE", "");
let argv = vec!["lsd"];
let matches = app::build().get_matches_from_safe(argv).unwrap();
let mut config = Config::with_none();
config.date = Some("+%c".into());
assert_eq!(
DateFlag::Formatted("%c".into()),
DateFlag::configure_from(&matches, &config)
);
}
}
| test_from_config_classic_mode |
alleyoop_utrrates.py | """Run Alleyoop utrrates tool on Slamdunk results."""
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
Process,
StringField,
)
class AlleyoopUtrRates(Process):
"""Run Alleyoop utrrates."""
slug = "alleyoop-utr-rates"
process_type = "data:alleyoop:utrrates"
name = "Alleyoop UTR Rates"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/slamdunk:2.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
category = "Slamdunk"
data_name = '{{ slamdunk|sample_name|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for AlleyoopUtrRates."""
ref_seq = DataField(
"seq:nucleotide", label="FASTA file containig sequences for aligning"
)
regions = DataField(
"bed", label="BED file with coordinates of regions of interest"
)
slamdunk = DataField("alignment:bam:slamdunk", label="Slamdunk results")
read_length = IntegerField(
label="Maximum read length",
description="Maximum length of reads in the input FASTQ file",
default=150,
)
class Output:
"""Output fields to process AlleyoopUtrRates."""
report = FileField(
label="Tab-separated file containing conversion rates on each region of interest"
)
plot = FileField(label="Region of interest conversion rate plot")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = os.path.basename(inputs.slamdunk.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
args = [
"-o",
"utrrates",
"-r",
inputs.ref_seq.output.fasta.path,
"-b",
inputs.regions.output.bed.path,
"-l",
inputs.read_length,
]
return_code, _, _ = Cmd["alleyoop"]["utrrates"][args][
inputs.slamdunk.output.bam.path
] & TEE(retcode=None)
if return_code:
self.error("Alleyoop utrrates analysis failed.")
rates_file = os.path.join("utrrates", f"{name}_mutationrates_utr.csv")
rates_file_renamed = os.path.join("utrrates", f"{name}_mutationrates.txt")
os.rename(rates_file, rates_file_renamed)
outputs.report = rates_file_renamed
outputs.plot = os.path.join("utrrates", f"{name}_mutationrates_utr.pdf")
outputs.species = inputs.slamdunk.output.species
outputs.build = inputs.slamdunk.output.build | ||
aggregator.go | /*
* Copyright 2020-2021, Offchain Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package aggregator
import (
"context"
"math/big"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/batcher"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/snapshot"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/txdb"
"github.com/offchainlabs/arbitrum/packages/arb-util/core"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
ethcommon "github.com/ethereum/go-ethereum/common"
ethcore "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/arbitrum/packages/arb-evm/evm"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-util/machine"
)
var logger = log.With().Caller().Str("component", "aggregator").Logger()
type Server struct {
chain common.Address
chainId *big.Int
batch batcher.TransactionBatcher
db *txdb.TxDB
scope event.SubscriptionScope
}
// NewServer returns a new instance of the Server class
func NewServer(
batch batcher.TransactionBatcher,
rollupAddress common.Address,
chainId *big.Int,
db *txdb.TxDB,
) *Server {
return &Server{
chain: rollupAddress,
chainId: chainId,
batch: batch,
db: db,
}
}
// SendTransaction takes a request signed transaction l2message from a Client
// and puts it in a queue to be included in the next transaction batch
func (m *Server) SendTransaction(ctx context.Context, tx *types.Transaction) error {
return m.batch.SendTransaction(ctx, tx)
}
func (m *Server) GetBlockCount() (uint64, error) {
latest, err := m.db.BlockCount()
if err != nil {
return 0, err
}
return latest, nil
}
func (m *Server) BlockNum(block *rpc.BlockNumber) (uint64, error) {
if *block == rpc.LatestBlockNumber || *block == rpc.PendingBlockNumber {
latest, err := m.db.LatestBlock()
if err != nil {
return 0, err
}
return latest.Header.Number.Uint64(), nil
} else if *block >= 0 {
return uint64(*block), nil
} else {
return 0, errors.Errorf("unsupported BlockNumber: %v", block.Int64())
}
}
func (m *Server) LatestBlockHeader() (*types.Header, error) {
latest, err := m.db.LatestBlock()
if err != nil || latest == nil {
return nil, err
}
return latest.Header, nil
}
// GetMessageResult returns the value output by the VM in response to the
// l2message with the given hash
func (m *Server) GetRequestResult(requestId common.Hash) (*evm.TxResult, error) {
return m.db.GetRequest(requestId)
}
func (m *Server) GetL2ToL1Proof(batchNumber *big.Int, index uint64) (*evm.MerkleRootProof, error) {
batch, err := m.db.GetMessageBatch(batchNumber)
if err != nil {
return nil, err
}
if batch == nil {
return nil, errors.New("batch doesn't exist")
}
return batch.GenerateProof(index)
}
func (m *Server) GetChainAddress() ethcommon.Address {
return m.chain.ToEthAddress()
}
func (m *Server) ChainId() *big.Int {
return m.chainId
}
func (m *Server) BlockInfoByNumber(height uint64) (*machine.BlockInfo, error) {
return m.db.GetBlock(height)
}
func (m *Server) BlockLogFromInfo(block *machine.BlockInfo) (*evm.BlockInfo, error) {
return m.db.GetL2Block(block)
}
func (m *Server) BlockInfoByHash(hash common.Hash) (*machine.BlockInfo, error) {
return m.db.GetBlockWithHash(hash)
}
func (m *Server) GetMachineBlockResults(block *machine.BlockInfo) (*evm.BlockInfo, []*evm.TxResult, error) {
return m.db.GetBlockResults(block)
}
func (m *Server) GetTxInBlockAtIndexResults(res *machine.BlockInfo, index uint64) (*evm.TxResult, error) {
avmLog, err := core.GetZeroOrOneLog(m.db.Lookup, new(big.Int).SetUint64(res.InitialLogIndex()+index))
if err != nil || avmLog == nil {
return nil, err
}
evmRes, err := evm.NewTxResultFromValue(avmLog)
if err != nil {
return nil, err
}
if evmRes.IncomingRequest.L2BlockNumber.Cmp(res.Header.Number) != 0 {
return nil, nil
}
return evmRes, nil
}
func (m *Server) GetSnapshot(blockHeight uint64) (*snapshot.Snapshot, error) {
return m.db.GetSnapshot(blockHeight)
}
func (m *Server) LatestSnapshot() (*snapshot.Snapshot, error) {
return m.db.LatestSnapshot()
}
func (m *Server) PendingSnapshot() (*snapshot.Snapshot, error) {
pending, err := m.batch.PendingSnapshot()
if err != nil {
return nil, err
}
if pending == nil {
return m.LatestSnapshot()
}
return pending, nil
}
func (m *Server) Aggregator() *common.Address {
return m.batch.Aggregator()
}
func (m *Server) PendingTransactionCount(ctx context.Context, account common.Address) *uint64 {
return m.batch.PendingTransactionCount(ctx, account)
}
func (m *Server) ChainDb() ethdb.Database {
return nil
}
func (m *Server) HeaderByNumber(_ context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) {
height, err := m.BlockNum(&blockNumber)
if err != nil {
return nil, err
}
info, err := m.db.GetBlock(height)
if err != nil || info == nil {
return nil, err
}
return info.Header, nil
}
func (m *Server) HeaderByHash(_ context.Context, blockHash ethcommon.Hash) (*types.Header, error) {
info, err := m.BlockInfoByHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
return info.Header, nil
}
func (m *Server) GetReceipts(_ context.Context, blockHash ethcommon.Hash) (types.Receipts, error) {
info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
_, results, err := m.GetMachineBlockResults(info)
if err != nil || results == nil {
return nil, err
}
receipts := make(types.Receipts, 0, len(results))
for _, res := range results {
receipts = append(receipts, res.ToEthReceipt(common.NewHashFromEth(blockHash)))
}
return receipts, nil
}
func (m *Server) GetLogs(_ context.Context, blockHash ethcommon.Hash) ([][]*types.Log, error) {
info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
_, results, err := m.GetMachineBlockResults(info)
if err != nil || results == nil {
return nil, err
}
logs := make([][]*types.Log, 0, len(results))
for _, res := range results {
logs = append(logs, res.EthLogs(common.NewHashFromEth(blockHash))) | func (m *Server) BloomStatus() (uint64, uint64) {
return 0, 0
}
func (m *Server) ServiceFilter(_ context.Context, _ *bloombits.MatcherSession) {
// Currently not implemented
}
func (m *Server) SubscribeNewTxsEvent(ch chan<- ethcore.NewTxsEvent) event.Subscription {
return m.scope.Track(m.batch.SubscribeNewTxsEvent(ch))
}
func (m *Server) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribePendingLogsEvent(ch))
}
func (m *Server) SubscribeChainEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainEvent(ch))
}
func (m *Server) SubscribeChainHeadEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainHeadEvent(ch))
}
func (m *Server) SubscribeChainSideEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainSideEvent(ch))
}
func (m *Server) SubscribeRemovedLogsEvent(ch chan<- ethcore.RemovedLogsEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeRemovedLogsEvent(ch))
}
func (m *Server) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribeLogsEvent(ch))
}
func (m *Server) SubscribeBlockProcessingEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribeBlockProcessingEvent(ch))
} | }
return logs, nil
}
|
secrets.go | // Package local implements Secrets plugin.
package local
import (
"errors"
"fmt"
"io/ioutil"
"reflect"
"strings"
"time"
"github.com/aws/aws-k8s-tester/eks/secrets"
eks_tester "github.com/aws/aws-k8s-tester/eks/tester"
"github.com/aws/aws-k8s-tester/eksconfig"
k8s_client "github.com/aws/aws-k8s-tester/pkg/k8s-client"
"github.com/aws/aws-k8s-tester/pkg/timeutil"
"go.uber.org/zap"
)
// Config defines secrets local tester configuration.
type Config struct {
Logger *zap.Logger
Stopc chan struct{}
EKSConfig *eksconfig.Config
K8SClient k8s_client.EKS
}
func New(cfg Config) eks_tester.Tester |
type tester struct {
cfg Config
}
func (ts *tester) Create() (err error) {
if !ts.cfg.EKSConfig.IsEnabledAddOnSecretsLocal() {
ts.cfg.Logger.Info("skipping tester.Create", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
if ts.cfg.EKSConfig.AddOnSecretsLocal.Created {
ts.cfg.Logger.Info("skipping tester.Create", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
ts.cfg.Logger.Info("starting tester.Create", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
ts.cfg.EKSConfig.AddOnSecretsLocal.Created = true
ts.cfg.EKSConfig.Sync()
createStart := time.Now()
defer func() {
createEnd := time.Now()
ts.cfg.EKSConfig.AddOnSecretsLocal.TimeFrameCreate = timeutil.NewTimeFrame(createStart, createEnd)
ts.cfg.EKSConfig.Sync()
}()
if err := k8s_client.CreateNamespace(
ts.cfg.Logger,
ts.cfg.K8SClient.KubernetesClientSet(),
ts.cfg.EKSConfig.AddOnSecretsLocal.Namespace,
); err != nil {
return err
}
loader := secrets.New(secrets.Config{
Logger: ts.cfg.Logger,
Stopc: ts.cfg.Stopc,
Client: ts.cfg.K8SClient,
ClientTimeout: ts.cfg.EKSConfig.ClientTimeout,
Namespace: ts.cfg.EKSConfig.AddOnSecretsLocal.Namespace,
NamePrefix: ts.cfg.EKSConfig.AddOnSecretsLocal.NamePrefix,
Objects: ts.cfg.EKSConfig.AddOnSecretsLocal.Objects,
ObjectSize: ts.cfg.EKSConfig.AddOnSecretsLocal.ObjectSize,
WritesJSONPath: ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesJSONPath,
ReadsJSONPath: ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsJSONPath,
})
loader.Start()
loader.Stop()
ts.cfg.Logger.Info("completing secrets local tester")
ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummary, ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummary, err = loader.CollectMetrics()
ts.cfg.EKSConfig.Sync()
if err != nil {
ts.cfg.Logger.Warn("failed to get metrics", zap.Error(err))
} else {
err = ioutil.WriteFile(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummaryJSONPath, []byte(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummary.JSON()), 0600)
if err != nil {
ts.cfg.Logger.Warn("failed to write file", zap.Error(err))
return err
}
err = ioutil.WriteFile(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummaryTablePath, []byte(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummary.Table()), 0600)
if err != nil {
ts.cfg.Logger.Warn("failed to write file", zap.Error(err))
return err
}
fmt.Printf("\n\nAddOnSecretsLocal.RequestsWritesSummary:\n%s\n", ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsWritesSummary.Table())
err = ioutil.WriteFile(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummaryJSONPath, []byte(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummary.JSON()), 0600)
if err != nil {
ts.cfg.Logger.Warn("failed to write file", zap.Error(err))
return err
}
err = ioutil.WriteFile(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummaryTablePath, []byte(ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummary.Table()), 0600)
if err != nil {
ts.cfg.Logger.Warn("failed to write file", zap.Error(err))
return err
}
fmt.Printf("\n\nAddOnSecretsLocal.RequestsReadsSummary:\n%s\n", ts.cfg.EKSConfig.AddOnSecretsLocal.RequestsReadsSummary.Table())
}
waitDur, retryStart := 5*time.Minute, time.Now()
for time.Now().Sub(retryStart) < waitDur {
select {
case <-ts.cfg.Stopc:
ts.cfg.Logger.Warn("health check aborted")
return nil
case <-time.After(5 * time.Second):
}
err = ts.cfg.K8SClient.CheckHealth()
if err == nil {
break
}
ts.cfg.Logger.Warn("health check failed", zap.Error(err))
}
ts.cfg.EKSConfig.Sync()
if err == nil {
ts.cfg.Logger.Info("health check success after secrets local tester")
} else {
ts.cfg.Logger.Warn("health check failed after secrets local tester", zap.Error(err))
}
return err
}
func (ts *tester) Delete() error {
if !ts.cfg.EKSConfig.IsEnabledAddOnSecretsLocal() {
ts.cfg.Logger.Info("skipping tester.Delete", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
if !ts.cfg.EKSConfig.AddOnSecretsLocal.Created {
ts.cfg.Logger.Info("skipping tester.Delete", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
ts.cfg.Logger.Info("starting tester.Delete", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
deleteStart := time.Now()
defer func() {
deleteEnd := time.Now()
ts.cfg.EKSConfig.AddOnSecretsLocal.TimeFrameDelete = timeutil.NewTimeFrame(deleteStart, deleteEnd)
ts.cfg.EKSConfig.Sync()
}()
var errs []string
if err := k8s_client.DeleteNamespaceAndWait(
ts.cfg.Logger,
ts.cfg.K8SClient.KubernetesClientSet(),
ts.cfg.EKSConfig.AddOnSecretsLocal.Namespace,
k8s_client.DefaultNamespaceDeletionInterval,
k8s_client.DefaultNamespaceDeletionTimeout,
); err != nil {
return fmt.Errorf("failed to delete secrets local tester namespace (%v)", err)
}
if len(errs) > 0 {
return errors.New(strings.Join(errs, ", "))
}
ts.cfg.EKSConfig.AddOnSecretsLocal.Created = false
return ts.cfg.EKSConfig.Sync()
}
func (ts *tester) AggregateResults() (err error) {
if !ts.cfg.EKSConfig.IsEnabledAddOnSecretsLocal() {
ts.cfg.Logger.Info("skipping tester.AggregateResults", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
if !ts.cfg.EKSConfig.AddOnSecretsLocal.Created {
ts.cfg.Logger.Info("skipping tester.AggregateResults", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
ts.cfg.Logger.Info("starting tester.AggregateResults", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return nil
}
| {
cfg.Logger.Info("creating tester", zap.String("tester", reflect.TypeOf(tester{}).PkgPath()))
return &tester{cfg: cfg}
} |
get_api_validation.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetApiValidationResult',
'AwaitableGetApiValidationResult',
'get_api_validation',
]
@pulumi.output_type
class GetApiValidationResult: | """
def __init__(__self__, api_id=None, id=None, validations=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if validations and not isinstance(validations, list):
raise TypeError("Expected argument 'validations' to be a list")
pulumi.set(__self__, "validations", validations)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> str:
return pulumi.get(self, "api_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def validations(self) -> Sequence['outputs.GetApiValidationValidationResult']:
"""
API validation results.
"""
return pulumi.get(self, "validations")
class AwaitableGetApiValidationResult(GetApiValidationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiValidationResult(
api_id=self.api_id,
id=self.id,
validations=self.validations)
def get_api_validation(api_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiValidationResult:
"""
This data source provides details about a specific Api Validation resource in Oracle Cloud Infrastructure API Gateway service.
Gets the API validation results.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_api_validation = oci.apigateway.get_api_validation(api_id=oci_apigateway_api["test_api"]["id"])
```
:param str api_id: The ocid of the API.
"""
__args__ = dict()
__args__['apiId'] = api_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:apigateway/getApiValidation:getApiValidation', __args__, opts=opts, typ=GetApiValidationResult).value
return AwaitableGetApiValidationResult(
api_id=__ret__.api_id,
id=__ret__.id,
validations=__ret__.validations) | """
A collection of values returned by getApiValidation. |
redis_action_ch05.py | """
@author: magician
@file: redis_action_ch05.py
@date: 2021/11/22
"""
import bisect
import contextlib
import csv
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
from datetime import datetime
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
SEVERITY = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'waring',
logging.ERROR: 'error',
logging.CRITICAL: 'critical',
}
SEVERITY.update((name, name) for name in list(SEVERITY.values()))
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400]
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
CONFIGS = {}
CHECKED = {}
REDIS_CONNECTIONS = {}
def to_bytes(x):
"""
to_bytes
@param x:
@return:
"""
return x.encode() if isinstance(x, str) else x
def to_str(x):
"""
to_str
@param x:
@return:
"""
return x.decode() if isinstance(x, bytes) else x
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
"""
log_recent
@param conn:
@param name:
@param message:
@param severity:
@param pipe:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'recent:%s:%s' % (name, severity)
message = time.asctime() + ' ' + message
pipe = pipe or conn.pipeline()
pipe.lpush(destination, message)
pipe.ltrim(destination, 0, 99)
pipe.execute()
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
"""
log_common
@param conn:
@param name:
@param message:
@param severity:
@param timeout:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'common:%s:%s' % (name, severity)
start_key = destination + ':start'
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if existing and existing < to_bytes(hour_start):
pipe.rename(destination, destination + ':last')
pipe.rename(destination, destination + ':pstart')
pipe.set(start_key, hour_start)
elif not existing:
pipe.set(start_key, hour_start)
pipe.zincrby(destination, 1, message)
log_recent(pipe, name, message, severity, pipe)
return
except redis.exceptions.WatchError:
continue
def update_counter(conn, name, count=1, now=None):
"""
update_counter
@param conn:
@param name:
@param count:
@param now:
@return:
"""
now = now or time.time()
pipe = conn.pipeline()
for prec in PRECISION:
pnow = int(now / prec) * prec
hash = '%s:%s' % (prec, name)
pipe.zadd('known:', {hash: 0})
pipe.hincrby('count: ' + hash, pnow, count)
pipe.execute()
def get_counter(conn, name, precision):
"""
get_counter
@param conn:
@param name:
@param precision:
@return:
"""
hash = "%s:%s" % (precision, name)
data = conn.hgetall('count:' + hash)
to_return = []
for key, value in data.items():
to_return.append((int(key), int(value)))
to_return.sort()
return to_return
def clean_counters(conn):
"""
clean_counters
@param conn:
@return:
"""
pipe = conn.pipeline(True)
passes = 0
while not QUIT:
start = time.time()
index = 0
while index < conn.zcard('known:'):
hash = conn.zcard('known:', index, index)
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0])
bprec = int(prec // 60) or 1
if passes % bprec:
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec
samples = list(map(int, conn.hkeys(hkey)))
samples.sort()
remove = bisect.bisect_right(samples, cutoff)
if remove:
conn.hdel(hkey, *samples[:remove])
if remove == len(samples):
try:
pipe.watch(hkey)
if not pipe.hlen(hkey):
pipe.multi()
pipe.zrem('known:', hash)
pipe.execute()
index -= 1
else:
pipe.unwatch()
except redis.exceptions.WatchError:
pass
passes += 1
duration = min(int(time.time() - start) + 1, 60)
time.sleep(max(60 - duration, 1))
def update_stats(conn, context, type, value, timeout=5):
"""
update_stats
@param conn:
@param context:
@param type:
@param value:
@param timeout:
@return:
"""
destination = 'stats:%s:%s' % (context, type)
start_key = destination + ':start'
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif to_str(existing) < hour_start:
pipe.rename(destination, destination + ':last')
pipe.rename(start_key, destination + ':pstart')
pipe.set(start_key, hour_start)
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value})
pipe.zadd(tkey1, {'max': value})
pipe.zunionstore(destination, [destination, tkey1], aggregate='min')
pipe.zunionstore(destination, [destination, tkey2], aggregate='max')
pipe.delete(tkey1, tkey2)
pipe.zincrby(destination, 1, 'count')
pipe.zincrby(destination, value, 'sum')
pipe.zincrby(destination, value * value, 'sumsq')
return pipe.execute()[-3:]
except redis.exceptions.WatchError:
continue
def get_stats(conn, context, type):
"""
get_stats
@param conn:
@param context:
@param type:
@return:
"""
key = 'stats:%s:%s' % (context, type)
data = dict(conn.zrange(key, 0, -1, withscores=True))
data[b'average'] = data[b'sum'] / data[b'count']
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count']
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** 0.5
return data
@contextlib.contextmanager
def access_time(conn, context):
"""
access_time
@param conn:
@param context:
@return:
"""
start = time.time()
yield
delta = time.time() - start
stats = update_stats(conn, context, 'AccessTime', delta)
average = stats[1] / stats[0]
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average})
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
pipe.execute()
def process_view(conn, callback):
"""
process_view
@param conn:
@param callback:
@return:
"""
with access_time(conn, request.path):
return callback()
def ip_to_score(ip_address):
"""
ip_to_score
@param ip_address:
@return:
"""
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
def import_ips_to_redis(conn, filename):
"""
import_ips_to_redis
@param conn:
@param filename:
@return:
"""
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
conn.zadd('ip2cityid:', {city_id: start_ip})
def import_cities_to_redis(conn, filename):
"""
import_cities_to_redis
@param conn:
@param filename:
@return:
"""
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
conn.hset('cityid2city:', city_id, json.dumps([city, country, region]))
def find_city_by_ip(conn, ip_address):
"""
find_city_by_ip
@param conn:
@param ip_address:
@return:
"""
if isinstance(ip_address, str):
ip_address = ip_to_score(ip_address)
city_id = conn.zrevrangebyscore('ip2cityid:', ip_address, 0, start=0, num=1)
if not city_id:
return None
city_id = city_id[0].partition('_')[0]
return json.loads(conn.hget('cityid2city:', city_id))
def is_under_maintenance(conn):
"""
is_under_maintenance
@param conn:
@return:
"""
global LAST_CHECKED, IS_UNDER_MAINTENANCE
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1:
LAST_CHECKED = time.time()
IS_UNDER_MAINTENANCE = bool(conn.get('is-under-maintenance'))
return IS_UNDER_MAINTENANCE
def set_config(conn, type, component, config):
"""
set_config
@param conn:
@param type:
@param component:
@param config:
@return:
"""
conn.set('config:%s:%s' % (type, component), json.dumps(config))
def get_config(conn, type, component, wait=1):
"""
get_config
@param conn:
@param type:
@param component:
@param wait:
@return:
"""
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait:
CHECKED[key] = time.time()
config = json.loads(conn.get(key) or '{}')
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key)
if config != old_config:
CONFIGS[key] = config
return CONFIGS.get(key)
def redis_connection(component, wait=1):
"""
redis_connection
@param component:
@param wait:
@return:
"""
key = 'config:redis:' + component
def wrapper(function):
@functools.wraps(function)
def call(*args, **kwargs):
old_config = CONFIGS.get(key, object())
config = get_config(config_connection, 'redis', component, wait)
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
return function(REDIS_CONNECTIONS.get(key), *args, **kwargs)
return call
return wrapper
# --------------- Below this line are helpers to test the code ----------------
class request:
pass
# # a faster version with pipelines for actual testing
# def import_ips_to_redis(conn, filename):
# csv_file = csv.reader(open(filename, 'rb'))
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv_file):
# start_ip = row[0] if row else ''
# if 'i' in start_ip.lower():
# continue
# if '.' in start_ip:
# start_ip = ip_to_score(start_ip)
# elif start_ip.isdigit():
# start_ip = int(start_ip, 10)
# else:
# continue
#
# city_id = row[2] + '_' + str(count)
# pipe.zadd('ip2cityid:', {city_id: start_ip})
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
#
#
# def import_cities_to_redis(conn, filename):
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv.reader(open(filename, 'rb'))):
# if len(row) < 4 or not row[0].isdigit():
# continue
# row = [i.decode('latin-1') for i in row]
# city_id = row[0]
# country = row[1]
# region = row[2]
# city = row[3]
# pipe.hset('cityid2city:', city_id,
# json.dumps([city, region, country]))
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
class TestCh05(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15, password='123456')
self.conn.flushdb()
def tearDown(self):
|
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
| self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print() |
venues.rs | //! Tools for grabbing venue data, both the basic data available from the API, as well as the svg files that will tell us where homeplate is.
//! The svg data is critical, since it will help us map the hit_data to actual coordinates on the field. It will also allow us to properly measure feet, since we'll hbe able to measure it against
//! the field's stated dimensions. Specifically, we'll look at the data points down the left and right field lines and see the distance in (x, y) that correlate to the HR distance down the lines.
//! This will allow us to convert the pixel distance to actual feet at the ballpark level.
//!
//! We may also want to compute travel distance (flight time) from location to location, but this will be a feauture to add later.
//!
use serde::{Deserialize, Serialize};
// use crate::utils::*;
use isahc::prelude::*;
use crate::cache;
use std::collections::BTreeSet;
use tree_buf::{Read, Write};
/// Link to all the venues used by the MLB Stats API. "Hydrated" fields include the location, field dimension and
/// cross reference IDs that can be used to link to retrosheet.
const VENUES: &str = "https://statsapi.mlb.com/api/v1/venues/?hydrate=location,fieldInfo,timezone,xrefId";
pub fn test_venues () {
let venues = isahc::get(VENUES).unwrap().text().unwrap();
let venue_data: Venues = serde_json::from_str(&venues).unwrap();
dbg!(&venue_data.venues[138]);
let mut venues_x_y = cache::load_venue_x_y();
let venues_cached: BTreeSet<u32> = venues_x_y
.iter()
.map(|venue| venue.id)
.collect()
;
let venues_x_y_new: Vec<VenueXY> = venue_data.venues
.iter()
.filter(|venue| !venues_cached.contains(&venue.id))
.map(|venue|
{
let id = venue.id;
let (x,y) = get_svg(id);
VenueXY {
id, x, y
}
}
)
.collect()
;
venues_x_y.extend(venues_x_y_new);
cache::cache_venue_x_y(&venues_x_y);
}
pub fn get_svg (id: u32) -> (Option<f32>, Option<f32>) {
let link = format!("http://mlb.mlb.com/images/gameday/fields/svg/{}.svg", id);
let svg_data = isahc::get(link).unwrap().text().unwrap();
if svg_data.contains("Page Not Found") {
return (None, None);
}
// The last <polyline> tag in the svg represents the baselines. The middle element is where the fair lines meet, which is the ideal
// point to set the (x,y) coordinates
let result = svg_data
.split("<polyline").last().unwrap()
.split("points=").nth(1).unwrap_or("")
.split(" ").nth(1).unwrap_or("")
.to_owned();
if !result.contains(",") {return (None, None)};
let split:Vec<&str> = result.split(",").collect();
(split[0].parse::<f32>().ok(), split[1].parse::<f32>().ok())
}
// pub fn venue_svg() {
// let svg_links: Vec<String> = (0 ..= 6_000)
// .map(|id| format!("http://mlb.mlb.com/images/gameday/fields/svg/{}.svg", id))
// .collect()
// ;
// let svg_data = stream_chunked(svg_links);
// dbg!(&svg_data.len());
// let _svgs: Vec<String> = svg_data
// .into_iter()
// .map(|svg| svg.unwrap())
// .filter(|svg| !svg.contains("Page Not Found"))
// .inspect(|svg| println!("{}", svg.split("<g id=").nth(1).unwrap_or(svg)))
// .map(|svg| svg
// .split(r#"<g id="Base"#).nth(1).unwrap_or("")
// .split("<polygon").nth(1).unwrap_or("")
// .split("points=").nth(1).unwrap_or("")
// .split(" ").nth(2).unwrap_or("")
// .to_owned())
// .inspect(|svg| println!("{}", svg))
// .collect()
// ;
// }
#[derive(Deserialize, Serialize, Debug, Copy, Clone)]
pub struct VenueXY {
pub id: u32,
pub x: Option<f32>,
pub y: Option<f32>,
}
#[derive(Deserialize, Debug)]
pub (crate) struct Venues {
pub (crate) venues: Vec<VenueDe>,
}
impl From<VenueDe> for Venue {
fn from (v: VenueDe) -> Venue {
let venue_retrosheet_id = v.xref_ids
.into_iter()
.filter(|id| id.xref_type == Some("retrosheet".to_string()))
.map(|id| id.xref_id.unwrap())
.nth(0)
.unwrap_or("".to_string())
;
let (venue_latitude, venue_longitude) = match v.location.default_coordinates {
Some (loc) => (Some(loc.latitude), Some(loc.longitude)),
_ => (None, None)
};
Venue {
id: v.id,
venue_name: v.name,
venue_city: v.location.city,
venue_state: v.location.state.unwrap_or("".to_string()),
venue_state_abbr: v.location.state_abbrev.unwrap_or("".to_string()),
venue_time_zone: v.time_zone.id,
venue_time_zone_offset: v.time_zone.id.into(),
venue_capacity: v.field_info.capacity,
venue_surface: v.field_info.turf_type,
venue_roof: v.field_info.roof_type,
venue_retrosheet_id,
venue_left_line: v.field_info.left_line,
venue_left: v.field_info.left,
venue_left_center: v.field_info.left_center,
venue_center: v.field_info.center,
venue_right_center: v.field_info.right_center,
venue_right_line: v.field_info.right_line,
venue_right: v.field_info.right,
venue_latitude,
venue_longitude,
}
}
}
impl From<TimeZone> for i8 {
fn from (t: TimeZone) -> i8 {
match t {
TimeZone::HST => -10,
TimeZone::PST => -8,
TimeZone::MST => -7,
TimeZone::CST => -6,
TimeZone::EST => -5,
TimeZone::VET | TimeZone::AST => -4,
TimeZone::ASIA => 9,
TimeZone::AUSTRALIA => 11,
TimeZone::EUROPE => 0,
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct VenueData {
pub year: u16,
pub venue: Venue,
}
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct Venue {
pub id: u32,
pub venue_name: String,
pub venue_city: String,
pub venue_state: String,
pub venue_state_abbr: String,
pub venue_time_zone: TimeZone,
pub venue_time_zone_offset: i8,
pub venue_capacity: Option<u32>,
pub venue_surface: Option<SurfaceType>,
pub venue_roof: Option<RoofType>,
pub venue_left_line: Option<u16>,
pub venue_left: Option<u16>,
pub venue_left_center: Option<u16>,
pub venue_center: Option<u16>,
pub venue_right_center: Option<u16>,
pub venue_right: Option<u16>,
pub venue_right_line: Option<u16>,
pub venue_retrosheet_id: String,
pub venue_latitude: Option<f32>,
pub venue_longitude: Option<f32>,
}
impl Default for Venue {
fn default() -> Self
{
Venue {
id: 401,
venue_name: "".to_string(),
venue_city: "".to_string(),
venue_state: "".to_string(),
venue_state_abbr: "".to_string(),
venue_time_zone: TimeZone::EST,
venue_time_zone_offset: 0,
venue_capacity: None,
venue_surface: None,
venue_roof: None,
venue_left_line: None,
venue_left: None,
venue_left_center: None,
venue_center: None,
venue_right_center: None,
venue_right: None,
venue_right_line: None,
venue_retrosheet_id: "".to_string(),
venue_latitude: None,
venue_longitude: None,
}
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all="camelCase")]
pub(crate) struct VenueDe {
pub(crate) id: u32,
pub(crate) name: String,
pub(crate) location: Location,
pub(crate) time_zone: TimeZoneData,
pub(crate) field_info: FieldInfo,
#[serde(default="default_xref_ids")]
pub(crate) xref_ids: Vec<XRefID>,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all="camelCase")]
pub(crate) struct Location {
pub(crate) city: String,
pub(crate) state: Option<String>,
pub(crate) state_abbrev: Option<String>,
pub(crate) default_coordinates: Option<Coords>,
}
#[derive(Deserialize, Debug, Clone)]
pub (crate) struct Coords {
pub(crate) latitude: f32,
pub(crate) longitude: f32,
}
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct TimeZoneData {
pub(crate) id: TimeZone,
}
| pub(crate) capacity: Option<u32>,
pub(crate) turf_type: Option<SurfaceType>,
pub(crate) roof_type: Option<RoofType>,
pub(crate) left_line: Option<u16>,
pub(crate) left: Option<u16>,
pub(crate) left_center: Option<u16>,
pub(crate) center: Option<u16>,
pub(crate) right_center: Option<u16>,
pub(crate) right: Option<u16>,
pub(crate) right_line: Option<u16>,
}
#[serde(rename_all="camelCase")]
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct XRefID {
pub(crate) xref_id: Option<String>,
pub(crate) xref_type: Option<String>,
}
fn default_xref_ids() -> Vec<XRefID> {
vec![]
}
// #[serde(field_identifier)]
#[derive(Deserialize, Serialize, Debug, Copy, Clone)]
pub enum TimeZone {
/// ### GMT -10
/// * Pacific/Honolulu
#[serde(alias="Pacific/Honolulu")]
HST,
/// ### GMT -8
/// * America/Los_Angeles
/// * America/Vancouver
/// * America/Tijuana
#[serde(alias="America/Los_Angeles", alias="America/Vancouver", alias="America/Tijuana")]
PST,
/// ### GMT -7 Includes the following time zones:
/// * America/Hermosillo
/// * America/Denver
/// * America/Phoenix
/// * America/Boise
/// * America/Edmonton
#[serde(alias="America/Hermosillo", alias="America/Denver", alias="America/Phoenix", alias="America/Boise", alias="America/Edmonton")]
MST,
/// ### GMT -6
/// * America/Chicago
/// * America/Monterrey
/// * America/Cancun
/// * America/Mexico_City
/// * America/Winnipeg
/// * America/Merida
/// * America/Mazatlan
/// * America/Havana
/// * America/Matamoros
/// * America/Guatemala
#[serde(alias="America/Chicago", alias="America/Monterrey", alias="America/Cancun", alias="America/Mexico_City",
alias="America/Winnipeg", alias="America/Merida", alias="America/Mazatlan", alias = "America/Havana",
alias="America/Matamoros", alias="America/Guatemala")]
CST,
/// ### GMT -5
/// * America/New_York
/// * America/Panama
/// * America/Toronto
/// * America/Detroit
/// * America/Kentucky/Louisville
/// * America/Indiana/Indianapolis
#[serde(alias="America/New_York", alias="America/Panama", alias="America/Toronto", alias="America/Detroit", alias="America/Kentucky/Louisville", alias="America/Indiana/Indianapolis")]
EST,
/// ### GMT -4
/// * America/Caracas
#[serde(alias="America/Caracas")]
VET,
/// ### GMT -4
/// * America/Puerto_Rico
/// * America/Santo_Domingo
/// * America/Halifax
#[serde(alias="America/Puerto_Rico", alias="America/Santo_Domingo", alias="America/Halifax")]
AST,
/// ### GMT +9
/// All of China/Taiwan/Japan lumped together here
/// * Asia/Tokyo
/// * Asia/Taipei
/// * Asia/Seoul
/// * Asia/Shanghai
#[serde(alias="Asia/Tokyo", alias="Asia/Taipei", alias="Asia/Seoul", alias="Asia/Shanghai")]
ASIA,
/// ### GMT +11
/// Grouping all the australia locations together, even though this is slightly innacurate because it doesn't really matter.
/// * Australia/Sydney
/// * Australia/Perth
/// * Australia/Brisbane
/// * Australia/Melbourne
/// * Australia/Adelaide
#[serde(alias="Australia/Sydney", alias="Australia/Perth", alias="Australia/Brisbane", alias="Australia/Melbourne", alias="Australia/Adelaide")]
AUSTRALIA,
/// ### GMT ??
/// All of Europe lumped into here
/// * Europe/Helsinki
/// * Europe/Stockholm
/// * Europe/London
/// * Europe/Moscow
/// * Europe/Rome
/// * Europe/Berlin
/// * Asia/Jerusalem
/// * Europe/Amsterdam
/// * Europe/Prague
#[serde( alias="Europe/Helsinki", alias="Europe/Stockholm", alias="Europe/London", alias="Europe/Moscow", alias="Europe/Rome",
alias= "Europe/Berlin", alias="Asia/Jerusalem", alias="Europe/Amsterdam", alias="Europe/Prague")]
EUROPE
}
#[derive(Deserialize, Serialize, Debug, Copy, Clone, Read, Write, PartialEq)]
pub enum SurfaceType {
Artificial,
Grass,
Indoor,
}
#[derive(Deserialize, Serialize, Debug, Copy, Clone, Read, Write, PartialEq)]
pub enum RoofType {
Dome,
Open,
Retractable,
Indoor,
} | #[serde(rename_all="camelCase")]
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct FieldInfo { |
agent_availability_message.rs | /*
* Watson Assistant v2
*
* The IBM Watson™ Assistant service combines machine learning, natural language understanding, and an integrated dialog editor to create conversation flows between your apps and your users. The Assistant v2 API provides runtime methods your client application can use to send user input to an assistant and receive a response.
*
* The version of the OpenAPI document: 2.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct | {
/// The text of the message.
#[serde(rename = "message", skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl AgentAvailabilityMessage {
pub fn new() -> AgentAvailabilityMessage {
AgentAvailabilityMessage {
message: None,
}
}
}
| AgentAvailabilityMessage |
even-or-odd.rs | fn even_or_odd(i: i32) -> &'static str {
if i % 2 == 0 {
return "Even";
}
else {
return "Odd";
}
}
#[test]
fn returns_expected() {
assert_eq!(even_or_odd(0), "Even");
assert_eq!(even_or_odd(2), "Even");
assert_eq!(even_or_odd(1), "Odd"); | assert_eq!(even_or_odd(-1), "Odd");
} | assert_eq!(even_or_odd(7), "Odd"); |
config.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub struct Config {
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
pub fn builder() -> Builder {
Builder::default()
}
pub fn new(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overridden by the `Endpoint`, or by specifying a custom
/// [`SigningService`](aws_types::SigningService) during operation construction
pub fn signing_service(&self) -> &'static str {
"route53-recovery-cluster"
}
}
#[derive(Default)]
pub struct Builder {
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
pub fn | (mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self {
self.region = region.into();
self
}
/// Set the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new(
credentials_provider,
));
self
}
pub fn set_credentials_provider(
&mut self,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
) -> &mut Self {
self.credentials_provider = credentials_provider;
self
}
pub fn build(self) -> Config {
Config {
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: self.region,
credentials_provider: self.credentials_provider.unwrap_or_else(|| {
aws_types::credentials::SharedCredentialsProvider::new(
crate::no_credentials::NoCredentials,
)
}),
}
}
}
impl From<&aws_types::config::Config> for Builder {
fn from(input: &aws_types::config::Config) -> Self {
let mut builder = Builder::default();
builder = builder.region(input.region().cloned());
builder.set_credentials_provider(input.credentials_provider().cloned());
builder
}
}
impl From<&aws_types::config::Config> for Config {
fn from(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
}
| region |
error.rs | //! Error types
use {
num_derive::FromPrimitive,
solana_program::{decode_error::DecodeError, program_error::ProgramError},
thiserror::Error,
};
/// Errors that may be returned by the program.
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum GatewayError {
/// Incorrect authority provided
#[error("Incorrect authority provided")]
IncorrectAuthority,
}
impl From<GatewayError> for ProgramError {
fn from(e: GatewayError) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for GatewayError {
fn | () -> &'static str {
"Gateway Error"
}
}
| type_of |