file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.js | import React from 'react'
import ReactDOM from 'react-dom'
import App from './App.js'
| <React.StrictMode>
<App />
</React.StrictMode>,
document.getElementById('root')
) |
ReactDOM.render( |
chain_of_logic_advanced.py | """ generate random strings of logic
v2 - randomly adds 'not' before values
TODO: 1. add random parentheses, 2. add expressions like 'i==1' or 'print()' for values
TODO: Make it more explicit as to which True or False value it is evaluating to.
The cycle:
1. Start with a True
a. keep doing 'and True' to the end
b. if you hit an 'or' before then, stop there (before the 'or', with the current True value)
c. if you hit 'and False' look for the next 'or'
i. if you find one, start the cycle again from there (after the 'or')
ii. if you don't then stop there (on that False value)
2. Start with a False
a. look for the next 'or'
i. if you find one, start the cycle again from there (after the 'or')
ii. if you don't then stop there (on that False value)
"""
import random
def r2():
return random.randint(0, 1)
def r7():
return random.randint(0,6)
tv = true_values = ["'a'", "'b'", "'c'", "'d'", "'e'", "'f'", "'g'"]
fv = false_values = ["''", 0, (), [], {}, set(), None]
lv = logic_values = ['and', 'or'] | nv = ['', 'not ']
vals = [tv, fv]
n = 5
cont = ''
while cont == '':
expr = "{}{}".format(nv[r2()], vals[r2()][r7()])
for i in range(n):
item = " {} {}{}".format(lv[r2()], nv[r2()], vals[r2()][r7()])
expr += item
print('\n' + expr + '\n')
resp = input("Enter for answer...")
ans = eval(expr)
if isinstance(ans, str): ans = ans or "''"
print(f"result: {ans}")
cont = input("\nEnter to continue... ") | |
util.py | # engine/util.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from .. import exc
from .. import util
try:
from sqlalchemy.cyextension.util import _distill_params_20 # noqa
from sqlalchemy.cyextension.util import _distill_raw_params # noqa
except ImportError:
from ._py_util import _distill_params_20 # noqa
from ._py_util import _distill_raw_params # noqa
def connection_memoize(key):
"""Decorator, memoize a function in a connection.info stash.
Only applicable to functions which take no arguments other than a
connection. The memo will be stored in ``connection.info[key]``.
"""
@util.decorator
def decorated(fn, self, connection):
connection = connection.connect()
try:
return connection.info[key]
except KeyError:
connection.info[key] = val = fn(self, connection)
return val
return decorated
class TransactionalContext:
"""Apply Python context manager behavior to transaction objects.
Performs validation to ensure the subject of the transaction is not
used if the transaction were ended prematurely.
"""
__slots__ = ("_outer_trans_ctx", "_trans_subject", "__weakref__")
def _transaction_is_active(self):
raise NotImplementedError()
def _transaction_is_closed(self):
raise NotImplementedError()
def _rollback_can_be_called(self):
"""indicates the object is in a state that is known to be acceptable
for rollback() to be called.
This does not necessarily mean rollback() will succeed or not raise
an error, just that there is currently no state detected that indicates
rollback() would fail or emit warnings.
It also does not mean that there's a transaction in progress, as
it is usually safe to call rollback() even if no transaction is
present.
.. versionadded:: 1.4.28
"""
raise NotImplementedError()
def _get_subject(self):
raise NotImplementedError()
@classmethod
def _trans_ctx_check(cls, subject):
|
def __enter__(self):
subject = self._get_subject()
# none for outer transaction, may be non-None for nested
# savepoint, legacy nesting cases
trans_context = subject._trans_context_manager
self._outer_trans_ctx = trans_context
self._trans_subject = subject
subject._trans_context_manager = self
return self
def __exit__(self, type_, value, traceback):
subject = getattr(self, "_trans_subject", None)
# simplistically we could assume that
# "subject._trans_context_manager is self". However, any calling
# code that is manipulating __exit__ directly would break this
# assumption. alembic context manager
# is an example of partial use that just calls __exit__ and
# not __enter__ at the moment. it's safe to assume this is being done
# in the wild also
out_of_band_exit = (
subject is None or subject._trans_context_manager is not self
)
if type_ is None and self._transaction_is_active():
try:
self.commit()
except:
with util.safe_reraise():
if self._rollback_can_be_called():
self.rollback()
finally:
if not out_of_band_exit:
subject._trans_context_manager = self._outer_trans_ctx
self._trans_subject = self._outer_trans_ctx = None
else:
try:
if not self._transaction_is_active():
if not self._transaction_is_closed():
self.close()
else:
if self._rollback_can_be_called():
self.rollback()
finally:
if not out_of_band_exit:
subject._trans_context_manager = self._outer_trans_ctx
self._trans_subject = self._outer_trans_ctx = None
| trans_context = subject._trans_context_manager
if trans_context:
if not trans_context._transaction_is_active():
raise exc.InvalidRequestError(
"Can't operate on closed transaction inside context "
"manager. Please complete the context manager "
"before emitting further commands."
) |
task_random.py | import numpy as np
from environment import POMDPEnvironment
from rand.random_controller import RandomController
class VoiceTask_random:
avg_rewards = []
def __init__(self, env_file, prior):
self.environment = POMDPEnvironment(env_file)
self.prior = self.belief = prior
self.next_action = np.random.choice(len(self.environment.actions))
self.totalTurn = 0
self.totalReward = 0
self.totalEpisode = 0
self.stepInEpisode = 0
self.controller = RandomController(self.environment.states,
self.environment.actions,
self.belief, self.next_action)
self.init_episode()
pass
def init_episode(self):
self.environment.init_episode()
self.belief = self.prior
return self.belief
def do_steps(self, n=100):
for i in range(n):
episode_end = self.do_step()
if episode_end:
self.init_episode() # reset belief to initial belief [0.65, 0.35]
avg_reward = float(np.round((self.totalReward / self.totalEpisode), 3))
print 'avg reward: %.3f' % avg_reward
self.avg_rewards.append(tuple((self.totalEpisode, avg_reward)))
self.stepInEpisode = 0
def do_step(self):
print '\nturn: %d' % self.totalTurn
episode_end = False
old_belief = self.belief
old_action = self.next_action
action_str = self.get_action_str(old_action)
reward = self.environment.observe_reward(old_action)
if action_str == 'ask':
pass
else:
# terminal step
episode_end = True
self.totalEpisode += 1
pass
# new belief s'
observation_num = self.environment.get_observation(old_action)
new_belief = self.environment.update_belief(
old_belief, old_action, observation_num)
# new action a'
new_action = self.controller.get_best_action(new_belief)
self.controller.observe_step(old_belief, old_action, reward, new_belief, new_action, True)
# save belief & action for next turn
self.belief = new_belief
self.next_action = new_action
# counting turn & reward
self.totalTurn += 1
self.totalReward += reward
# self.stepInEpisode += 1
# if self.stepInEpisode == 10:
# episode_end = True
# self.totalEpisode += 1
return episode_end
def do_episodes(self, n=100):
while True:
if self.totalEpisode == n:
break
episode_end = self.do_step()
if episode_end:
self.init_episode() # reset belief to initial belief [0.65, 0.35]
avg_reward = float(np.round((self.totalReward / self.totalEpisode), 3))
print 'avg reward: %.3f' % avg_reward
self.avg_rewards.append(tuple((self.totalEpisode, avg_reward)))
self.stepInEpisode = 0
pass
def print_summary(self):
self.controller.end()
print 'random policy - total steps: %d' % self.totalTurn
print '\n-------summary-------------'
print 'Total Episodes: %d' % self.totalEpisode
print 'Total Rewards: %d' % self.totalReward
print 'Avg Reward per Episode: %f' % (self.totalReward / self.totalEpisode)
print '---------------------------'
def get_reward_data(self):
return self.avg_rewards
def save_results(self, filenm):
import csv
avg_rewards = self.get_reward_data()
with open(filenm, 'wb') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=['episode', 'avg_reward'])
writer.writeheader()
for (episode, avg_reward) in avg_rewards:
writer.writerow({'episode': episode, 'avg_reward': avg_reward})
def get_action_str(self, action_num):
return self.environment.actions[action_num]
def get_observation_str(self, observation_num): | return self.environment.observations[observation_num]
def test_get_best_action(self):
self.controller.get_best_action(self.belief) | |
batch_spec_test.go | package types
import (
"testing"
)
func TestComputeBatchSpecState(t *testing.T) | {
uploadedSpec := &BatchSpec{CreatedFromRaw: false}
createdFromRawSpec := &BatchSpec{CreatedFromRaw: true}
tests := []struct {
stats BatchSpecStats
spec *BatchSpec
want BatchSpecState
}{
{
stats: BatchSpecStats{ResolutionDone: false},
spec: uploadedSpec,
want: BatchSpecStateCompleted,
},
{
stats: BatchSpecStats{ResolutionDone: false},
spec: createdFromRawSpec,
want: BatchSpecStatePending,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5},
spec: createdFromRawSpec,
want: BatchSpecStatePending,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 3},
spec: createdFromRawSpec,
want: BatchSpecStateQueued,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 2, Processing: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 1, Completed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 0, Completed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Processing: 0, Completed: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCompleted,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 1, Failed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 1, Processing: 0, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Processing: 0, Failed: 3},
spec: createdFromRawSpec,
want: BatchSpecStateFailed,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Queued: 0, Completed: 1, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateFailed,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 2, Completed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 2, Failed: 1},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 1, Queued: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceling: 1, Processing: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 3},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Failed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Completed: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceled,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 2},
spec: createdFromRawSpec,
want: BatchSpecStateCanceling,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 1, Queued: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Processing: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Canceling: 1, Processing: 1},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 5, Executions: 3, Canceled: 1, Queued: 2},
spec: createdFromRawSpec,
want: BatchSpecStateProcessing,
},
{
stats: BatchSpecStats{ResolutionDone: true, Workspaces: 0, Executions: 0},
spec: createdFromRawSpec,
want: BatchSpecStateCompleted,
},
}
for idx, tt := range tests {
have := ComputeBatchSpecState(tt.spec, tt.stats)
if have != tt.want {
t.Errorf("test %d/%d: unexpected batch spec state. want=%s, have=%s", idx+1, len(tests), tt.want, have)
}
}
} |
|
padleft.spec.ts | import { padLeft } from "../padLeft";
import { testCases } from "./padleft-cases";
describe("padLeft operation", () => {
for (const t of testCases) {
it(`should padLeft "${t[0]}" to length "${t[0]}" using "${
t[2]
}" should be equal to "${t[3]}"`, () => { | });
}
}); | expect(padLeft(t[0], t[1], t[2])).toBe(t[3]); |
model_billing_address_1.go | /*
* HyperOne
*
* HyperOne API
*
* API version: 0.1.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package h1
import (
"encoding/json"
)
// BillingAddress1 struct for BillingAddress1
type BillingAddress1 struct {
City *string `json:"city,omitempty"`
Zipcode *string `json:"zipcode,omitempty"`
Street *string `json:"street,omitempty"`
}
// NewBillingAddress1 instantiates a new BillingAddress1 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBillingAddress1() *BillingAddress1 {
this := BillingAddress1{}
return &this
}
// NewBillingAddress1WithDefaults instantiates a new BillingAddress1 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func | () *BillingAddress1 {
this := BillingAddress1{}
return &this
}
// GetCity returns the City field value if set, zero value otherwise.
func (o *BillingAddress1) GetCity() string {
if o == nil || o.City == nil {
var ret string
return ret
}
return *o.City
}
// GetCityOk returns a tuple with the City field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BillingAddress1) GetCityOk() (*string, bool) {
if o == nil || o.City == nil {
return nil, false
}
return o.City, true
}
// HasCity returns a boolean if a field has been set.
func (o *BillingAddress1) HasCity() bool {
if o != nil && o.City != nil {
return true
}
return false
}
// SetCity gets a reference to the given string and assigns it to the City field.
func (o *BillingAddress1) SetCity(v string) {
o.City = &v
}
// GetZipcode returns the Zipcode field value if set, zero value otherwise.
func (o *BillingAddress1) GetZipcode() string {
if o == nil || o.Zipcode == nil {
var ret string
return ret
}
return *o.Zipcode
}
// GetZipcodeOk returns a tuple with the Zipcode field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BillingAddress1) GetZipcodeOk() (*string, bool) {
if o == nil || o.Zipcode == nil {
return nil, false
}
return o.Zipcode, true
}
// HasZipcode returns a boolean if a field has been set.
func (o *BillingAddress1) HasZipcode() bool {
if o != nil && o.Zipcode != nil {
return true
}
return false
}
// SetZipcode gets a reference to the given string and assigns it to the Zipcode field.
func (o *BillingAddress1) SetZipcode(v string) {
o.Zipcode = &v
}
// GetStreet returns the Street field value if set, zero value otherwise.
func (o *BillingAddress1) GetStreet() string {
if o == nil || o.Street == nil {
var ret string
return ret
}
return *o.Street
}
// GetStreetOk returns a tuple with the Street field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BillingAddress1) GetStreetOk() (*string, bool) {
if o == nil || o.Street == nil {
return nil, false
}
return o.Street, true
}
// HasStreet returns a boolean if a field has been set.
func (o *BillingAddress1) HasStreet() bool {
if o != nil && o.Street != nil {
return true
}
return false
}
// SetStreet gets a reference to the given string and assigns it to the Street field.
func (o *BillingAddress1) SetStreet(v string) {
o.Street = &v
}
func (o BillingAddress1) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.City != nil {
toSerialize["city"] = o.City
}
if o.Zipcode != nil {
toSerialize["zipcode"] = o.Zipcode
}
if o.Street != nil {
toSerialize["street"] = o.Street
}
return json.Marshal(toSerialize)
}
type NullableBillingAddress1 struct {
value *BillingAddress1
isSet bool
}
func (v NullableBillingAddress1) Get() *BillingAddress1 {
return v.value
}
func (v *NullableBillingAddress1) Set(val *BillingAddress1) {
v.value = val
v.isSet = true
}
func (v NullableBillingAddress1) IsSet() bool {
return v.isSet
}
func (v *NullableBillingAddress1) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBillingAddress1(val *BillingAddress1) *NullableBillingAddress1 {
return &NullableBillingAddress1{value: val, isSet: true}
}
func (v NullableBillingAddress1) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBillingAddress1) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| NewBillingAddress1WithDefaults |
job.go | /*
Copyright 2018 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"context"
"crypto/tls"
"fmt"
"time"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/heptio/sonobuoy/pkg/errlog"
"github.com/heptio/sonobuoy/pkg/plugin"
"github.com/heptio/sonobuoy/pkg/plugin/driver"
"github.com/heptio/sonobuoy/pkg/plugin/driver/utils"
"github.com/heptio/sonobuoy/pkg/plugin/manifest"
sonotime "github.com/heptio/sonobuoy/pkg/time"
)
const (
// pollingInterval is the time between polls when monitoring the job status.
pollingInterval = 10 * time.Second
)
// Plugin is a plugin driver that dispatches a single pod to the given
// kubernetes cluster.
type Plugin struct {
driver.Base
}
// Ensure Plugin implements plugin.Interface
var _ plugin.Interface = &Plugin{}
// NewPlugin creates a new DaemonSet plugin from the given Plugin Definition
// and sonobuoy aggregator address.
func NewPlugin(dfn manifest.Manifest, namespace, sonobuoyImage, imagePullPolicy, imagePullSecrets string, customAnnotations map[string]string) *Plugin {
return &Plugin{
driver.Base{
Definition: dfn,
SessionID: utils.GetSessionID(),
Namespace: namespace,
SonobuoyImage: sonobuoyImage,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
CustomAnnotations: customAnnotations,
CleanedUp: false, // be explicit
},
}
}
// ExpectedResults returns the list of results expected for this plugin. Since
// a Job only launches one pod, only one result type is expected.
func (p *Plugin) ExpectedResults(nodes []v1.Node) []plugin.ExpectedResult {
return []plugin.ExpectedResult{
{ResultType: p.GetName(), NodeName: plugin.GlobalResult},
}
}
func (p *Plugin) createPodDefinition(hostname string, cert *tls.Certificate, ownerPod *v1.Pod) v1.Pod {
pod := v1.Pod{}
annotations := map[string]string{
"sonobuoy-driver": p.GetDriver(),
"sonobuoy-plugin": p.GetName(),
}
for k, v := range p.CustomAnnotations {
annotations[k] = v
}
labels := map[string]string{
"component": "sonobuoy",
"tier": "analysis",
"sonobuoy-run": p.SessionID,
"sonobuoy-plugin": p.GetName(),
}
pod.ObjectMeta = metav1.ObjectMeta{
Name: fmt.Sprintf("sonobuoy-%s-job-%s", p.GetName(), p.SessionID),
Namespace: p.Namespace,
Labels: labels,
Annotations: annotations,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "Pod",
Name: ownerPod.GetName(),
UID: ownerPod.GetUID(),
},
},
}
var podSpec v1.PodSpec
if p.Definition.PodSpec != nil {
podSpec = p.Definition.PodSpec.PodSpec
} else {
podSpec = driver.DefaultPodSpec(p.GetDriver())
}
podSpec.Containers = append(podSpec.Containers,
p.Definition.Spec.Container,
p.CreateWorkerContainerDefintion(hostname, cert, []string{"/sonobuoy"}, []string{"worker", "global", "-v", "5", "--logtostderr"}),
)
if len(p.ImagePullSecrets) > 0 {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{
Name: p.ImagePullSecrets,
})
}
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "results",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
})
for _, v := range p.Definition.ExtraVolumes {
podSpec.Volumes = append(podSpec.Volumes, v.Volume)
}
pod.Spec = podSpec
return pod
}
// Run dispatches worker pods according to the Job's configuration.
func (p *Plugin) Run(kubeclient kubernetes.Interface, hostname string, cert *tls.Certificate, ownerPod *v1.Pod) error {
job := p.createPodDefinition(fmt.Sprintf("https://%s", hostname), cert, ownerPod)
secret, err := p.MakeTLSSecret(cert)
if err != nil {
return errors.Wrapf(err, "couldn't make secret for Job plugin %v", p.GetName())
}
if _, err := kubeclient.CoreV1().Secrets(p.Namespace).Create(secret); err != nil {
return errors.Wrapf(err, "couldn't create TLS secret for job plugin %v", p.GetName())
}
if _, err := kubeclient.CoreV1().Pods(p.Namespace).Create(&job); err != nil {
return errors.Wrapf(err, "could not create Job resource for Job plugin %v", p.GetName())
}
return nil
}
// Monitor adheres to plugin.Interface by ensuring the pod created by the job
// doesn't have any unrecoverable failures. It closes the results channel when
// it is done.
func (p *Plugin) Monitor(ctx context.Context, kubeclient kubernetes.Interface, _ []v1.Node, resultsCh chan<- *plugin.Result) {
defer close(resultsCh)
for {
// Sleep between each poll, which should give the Job
// enough time to create a Pod.
// TODO: maybe use a watcher instead of polling.
select {
case <-ctx.Done():
return
case <-sonotime.After(pollingInterval):
}
done, errResult := p.monitorOnce(kubeclient, nil)
if errResult != nil {
resultsCh <- errResult
}
if done {
return
}
}
}
func (p *Plugin) monitorOnce(kubeclient kubernetes.Interface, _ []v1.Node) (done bool, errResult *plugin.Result) {
// If we've cleaned up after ourselves, stop monitoring
if p.CleanedUp {
return true, nil
}
// Make sure there's a pod
pod, err := p.findPod(kubeclient)
if err != nil {
return true, utils.MakeErrorResult(p.GetName(), map[string]interface{}{"error": err.Error()}, plugin.GlobalResult)
}
// Make sure the pod isn't failing
if isFailing, reason := utils.IsPodFailing(pod); isFailing {
return true, utils.MakeErrorResult(p.GetName(), map[string]interface{}{
"error": reason,
"pod": pod,
}, plugin.GlobalResult)
}
return false, nil
}
// Cleanup cleans up the k8s Job and ConfigMap created by this plugin instance
func (p *Plugin) Cleanup(kubeclient kubernetes.Interface) {
p.CleanedUp = true
gracePeriod := int64(plugin.GracefulShutdownPeriod)
deletionPolicy := metav1.DeletePropagationBackground
listOptions := metav1.ListOptions{
LabelSelector: "sonobuoy-run=" + p.GetSessionID(),
}
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriod,
PropagationPolicy: &deletionPolicy,
}
// Delete the Pod created by the job manually (just deleting the Job
// doesn't kill the pod, it still lets it finish.)
// TODO: for now we're not actually creating a Job at all, just a
// single Pod, to get the restart semantics we want. But later if we
// want to make this a real Job, we still need to delete pods manually
// after deleting the job.
err := kubeclient.CoreV1().Pods(p.Namespace).DeleteCollection(
&deleteOptions,
listOptions,
)
if err != nil {
errlog.LogError(errors.Wrapf(err, "error deleting pods for Job-%v", p.GetSessionID()))
}
}
func (p *Plugin) listOptions() metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: "sonobuoy-run=" + p.GetSessionID(),
}
}
// findPod finds the pod created by this plugin, using a kubernetes label
// search. If no pod is found, or if multiple pods are found, returns an
// error.
func (p *Plugin) findPod(kubeclient kubernetes.Interface) (*v1.Pod, error) {
pods, err := kubeclient.CoreV1().Pods(p.Namespace).List(p.listOptions())
if err != nil |
if len(pods.Items) != 1 {
return nil, errors.Errorf("no pods were created by plugin %v", p.GetName())
}
return &pods.Items[0], nil
}
| {
return nil, errors.WithStack(err)
} |
abi.rs | // Generated by gir (https://github.com/gtk-rs/gir @ 1bef39f)
// from gir-files (https://github.com/gtk-rs/gir-files @ 7d95377)
// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git @ 831b444)
// DO NOT EDIT
use gstreamer_base_sys::*;
use std::env;
use std::error::Error;
use std::ffi::OsString;
use std::mem::{align_of, size_of};
use std::path::Path;
use std::process::Command;
use std::str;
use tempfile::Builder;
static PACKAGES: &[&str] = &["gstreamer-base-1.0"];
#[derive(Clone, Debug)]
struct Compiler {
pub args: Vec<String>,
}
impl Compiler {
pub fn new() -> Result<Self, Box<dyn Error>> {
let mut args = get_var("CC", "cc")?;
args.push("-Wno-deprecated-declarations".to_owned());
// For _Generic
args.push("-std=c11".to_owned());
// For %z support in printf when using MinGW.
args.push("-D__USE_MINGW_ANSI_STDIO".to_owned());
args.extend(get_var("CFLAGS", "")?);
args.extend(get_var("CPPFLAGS", "")?);
args.extend(pkg_config_cflags(PACKAGES)?);
Ok(Self { args })
}
pub fn | (&self, src: &Path, out: &Path) -> Result<(), Box<dyn Error>> {
let mut cmd = self.to_command();
cmd.arg(src);
cmd.arg("-o");
cmd.arg(out);
let status = cmd.spawn()?.wait()?;
if !status.success() {
return Err(format!("compilation command {:?} failed, {}", &cmd, status).into());
}
Ok(())
}
fn to_command(&self) -> Command {
let mut cmd = Command::new(&self.args[0]);
cmd.args(&self.args[1..]);
cmd
}
}
fn get_var(name: &str, default: &str) -> Result<Vec<String>, Box<dyn Error>> {
match env::var(name) {
Ok(value) => Ok(shell_words::split(&value)?),
Err(env::VarError::NotPresent) => Ok(shell_words::split(default)?),
Err(err) => Err(format!("{} {}", name, err).into()),
}
}
fn pkg_config_cflags(packages: &[&str]) -> Result<Vec<String>, Box<dyn Error>> {
if packages.is_empty() {
return Ok(Vec::new());
}
let pkg_config = env::var_os("PKG_CONFIG").unwrap_or_else(|| OsString::from("pkg-config"));
let mut cmd = Command::new(pkg_config);
cmd.arg("--cflags");
cmd.args(packages);
let out = cmd.output()?;
if !out.status.success() {
return Err(format!("command {:?} returned {}", &cmd, out.status).into());
}
let stdout = str::from_utf8(&out.stdout)?;
Ok(shell_words::split(stdout.trim())?)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct Layout {
size: usize,
alignment: usize,
}
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
struct Results {
/// Number of successfully completed tests.
passed: usize,
/// Total number of failed tests (including those that failed to compile).
failed: usize,
}
impl Results {
fn record_passed(&mut self) {
self.passed += 1;
}
fn record_failed(&mut self) {
self.failed += 1;
}
fn summary(&self) -> String {
format!("{} passed; {} failed", self.passed, self.failed)
}
fn expect_total_success(&self) {
if self.failed == 0 {
println!("OK: {}", self.summary());
} else {
panic!("FAILED: {}", self.summary());
};
}
}
#[test]
fn cross_validate_constants_with_c() {
let mut c_constants: Vec<(String, String)> = Vec::new();
for l in get_c_output("constant").unwrap().lines() {
let mut words = l.trim().split(';');
let name = words.next().expect("Failed to parse name").to_owned();
let value = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse value");
c_constants.push((name, value));
}
let mut results = Results::default();
for ((rust_name, rust_value), (c_name, c_value)) in
RUST_CONSTANTS.iter().zip(c_constants.iter())
{
if rust_name != c_name {
results.record_failed();
eprintln!("Name mismatch:\nRust: {:?}\nC: {:?}", rust_name, c_name,);
continue;
}
if rust_value != c_value {
results.record_failed();
eprintln!(
"Constant value mismatch for {}\nRust: {:?}\nC: {:?}",
rust_name, rust_value, &c_value
);
continue;
}
results.record_passed();
}
results.expect_total_success();
}
#[test]
fn cross_validate_layout_with_c() {
let mut c_layouts = Vec::new();
for l in get_c_output("layout").unwrap().lines() {
let mut words = l.trim().split(';');
let name = words.next().expect("Failed to parse name").to_owned();
let size = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse size");
let alignment = words
.next()
.and_then(|s| s.parse().ok())
.expect("Failed to parse alignment");
c_layouts.push((name, Layout { size, alignment }));
}
let mut results = Results::default();
for ((rust_name, rust_layout), (c_name, c_layout)) in RUST_LAYOUTS.iter().zip(c_layouts.iter())
{
if rust_name != c_name {
results.record_failed();
eprintln!("Name mismatch:\nRust: {:?}\nC: {:?}", rust_name, c_name,);
continue;
}
if rust_layout != c_layout {
results.record_failed();
eprintln!(
"Layout mismatch for {}\nRust: {:?}\nC: {:?}",
rust_name, rust_layout, &c_layout
);
continue;
}
results.record_passed();
}
results.expect_total_success();
}
fn get_c_output(name: &str) -> Result<String, Box<dyn Error>> {
let tmpdir = Builder::new().prefix("abi").tempdir()?;
let exe = tmpdir.path().join(name);
let c_file = Path::new("tests").join(name).with_extension("c");
let cc = Compiler::new().expect("configured compiler");
cc.compile(&c_file, &exe)?;
let mut abi_cmd = Command::new(exe);
let output = abi_cmd.output()?;
if !output.status.success() {
return Err(format!("command {:?} failed, {:?}", &abi_cmd, &output).into());
}
Ok(String::from_utf8(output.stdout)?)
}
const RUST_LAYOUTS: &[(&str, Layout)] = &[
(
"GstAggregator",
Layout {
size: size_of::<GstAggregator>(),
alignment: align_of::<GstAggregator>(),
},
),
(
"GstAggregatorClass",
Layout {
size: size_of::<GstAggregatorClass>(),
alignment: align_of::<GstAggregatorClass>(),
},
),
(
"GstAggregatorPad",
Layout {
size: size_of::<GstAggregatorPad>(),
alignment: align_of::<GstAggregatorPad>(),
},
),
(
"GstAggregatorPadClass",
Layout {
size: size_of::<GstAggregatorPadClass>(),
alignment: align_of::<GstAggregatorPadClass>(),
},
),
(
"GstAggregatorStartTimeSelection",
Layout {
size: size_of::<GstAggregatorStartTimeSelection>(),
alignment: align_of::<GstAggregatorStartTimeSelection>(),
},
),
(
"GstBaseParse",
Layout {
size: size_of::<GstBaseParse>(),
alignment: align_of::<GstBaseParse>(),
},
),
(
"GstBaseParseClass",
Layout {
size: size_of::<GstBaseParseClass>(),
alignment: align_of::<GstBaseParseClass>(),
},
),
(
"GstBaseParseFrame",
Layout {
size: size_of::<GstBaseParseFrame>(),
alignment: align_of::<GstBaseParseFrame>(),
},
),
(
"GstBaseParseFrameFlags",
Layout {
size: size_of::<GstBaseParseFrameFlags>(),
alignment: align_of::<GstBaseParseFrameFlags>(),
},
),
(
"GstBaseSink",
Layout {
size: size_of::<GstBaseSink>(),
alignment: align_of::<GstBaseSink>(),
},
),
(
"GstBaseSinkClass",
Layout {
size: size_of::<GstBaseSinkClass>(),
alignment: align_of::<GstBaseSinkClass>(),
},
),
(
"GstBaseSrc",
Layout {
size: size_of::<GstBaseSrc>(),
alignment: align_of::<GstBaseSrc>(),
},
),
(
"GstBaseSrcClass",
Layout {
size: size_of::<GstBaseSrcClass>(),
alignment: align_of::<GstBaseSrcClass>(),
},
),
(
"GstBaseSrcFlags",
Layout {
size: size_of::<GstBaseSrcFlags>(),
alignment: align_of::<GstBaseSrcFlags>(),
},
),
(
"GstBaseTransform",
Layout {
size: size_of::<GstBaseTransform>(),
alignment: align_of::<GstBaseTransform>(),
},
),
(
"GstBaseTransformClass",
Layout {
size: size_of::<GstBaseTransformClass>(),
alignment: align_of::<GstBaseTransformClass>(),
},
),
(
"GstBitReader",
Layout {
size: size_of::<GstBitReader>(),
alignment: align_of::<GstBitReader>(),
},
),
(
"GstBitWriter",
Layout {
size: size_of::<GstBitWriter>(),
alignment: align_of::<GstBitWriter>(),
},
),
(
"GstByteReader",
Layout {
size: size_of::<GstByteReader>(),
alignment: align_of::<GstByteReader>(),
},
),
(
"GstByteWriter",
Layout {
size: size_of::<GstByteWriter>(),
alignment: align_of::<GstByteWriter>(),
},
),
(
"GstCollectData",
Layout {
size: size_of::<GstCollectData>(),
alignment: align_of::<GstCollectData>(),
},
),
(
"GstCollectPads",
Layout {
size: size_of::<GstCollectPads>(),
alignment: align_of::<GstCollectPads>(),
},
),
(
"GstCollectPadsClass",
Layout {
size: size_of::<GstCollectPadsClass>(),
alignment: align_of::<GstCollectPadsClass>(),
},
),
(
"GstCollectPadsStateFlags",
Layout {
size: size_of::<GstCollectPadsStateFlags>(),
alignment: align_of::<GstCollectPadsStateFlags>(),
},
),
(
"GstDataQueue",
Layout {
size: size_of::<GstDataQueue>(),
alignment: align_of::<GstDataQueue>(),
},
),
(
"GstDataQueueClass",
Layout {
size: size_of::<GstDataQueueClass>(),
alignment: align_of::<GstDataQueueClass>(),
},
),
(
"GstDataQueueItem",
Layout {
size: size_of::<GstDataQueueItem>(),
alignment: align_of::<GstDataQueueItem>(),
},
),
(
"GstDataQueueSize",
Layout {
size: size_of::<GstDataQueueSize>(),
alignment: align_of::<GstDataQueueSize>(),
},
),
(
"GstPushSrc",
Layout {
size: size_of::<GstPushSrc>(),
alignment: align_of::<GstPushSrc>(),
},
),
(
"GstPushSrcClass",
Layout {
size: size_of::<GstPushSrcClass>(),
alignment: align_of::<GstPushSrcClass>(),
},
),
];
const RUST_CONSTANTS: &[(&str, &str)] = &[
("(gint) GST_AGGREGATOR_START_TIME_SELECTION_FIRST", "1"),
("(gint) GST_AGGREGATOR_START_TIME_SELECTION_SET", "2"),
("(gint) GST_AGGREGATOR_START_TIME_SELECTION_ZERO", "0"),
("GST_BASE_PARSE_FLAG_DRAINING", "2"),
("GST_BASE_PARSE_FLAG_LOST_SYNC", "1"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_CLIP", "4"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_DROP", "8"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_NEW_FRAME", "1"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_NONE", "0"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_NO_FRAME", "2"),
("(guint) GST_BASE_PARSE_FRAME_FLAG_QUEUE", "16"),
("(guint) GST_BASE_SRC_FLAG_LAST", "1048576"),
("(guint) GST_BASE_SRC_FLAG_STARTED", "32768"),
("(guint) GST_BASE_SRC_FLAG_STARTING", "16384"),
("GST_BASE_TRANSFORM_SINK_NAME", "sink"),
("GST_BASE_TRANSFORM_SRC_NAME", "src"),
("(guint) GST_COLLECT_PADS_STATE_EOS", "1"),
("(guint) GST_COLLECT_PADS_STATE_FLUSHING", "2"),
("(guint) GST_COLLECT_PADS_STATE_LOCKED", "16"),
("(guint) GST_COLLECT_PADS_STATE_NEW_SEGMENT", "4"),
("(guint) GST_COLLECT_PADS_STATE_WAITING", "8"),
];
| compile |
response.go | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package http
import (
"encoding/json"
"fmt"
"html/template"
"net/http"
"strings"
"time"
"github.com/ie310mu/ie310go/forks/github.com/ethereum/go-ethereum/log"
"github.com/ie310mu/ie310go/forks/github.com/ethereum/go-ethereum/metrics"
"github.com/ie310mu/ie310go/forks/github.com/ethereum/go-ethereum/swarm/api"
)
var (
htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil)
jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil)
plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil)
)
type ResponseParams struct {
Msg template.HTML
Code int
Timestamp string
template *template.Template
Details template.HTML
}
// ShowMultipleChoices is used when a user requests a resource in a manifest which results
// in ambiguous results. It returns a HTML page with clickable links of each of the entry
// in the manifest which fits the request URI ambiguity.
// For example, if the user requests bzz:/<hash>/read and that manifest contains entries
// "readme.md" and "readinglist.txt", a HTML page is returned with this two links.
// This only applies if the manifest has no default entry
func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) {
log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()))
msg := ""
if list.Entries == nil {
respondError(w, r, "Could not resolve", http.StatusInternalServerError)
return
}
requestUri := strings.TrimPrefix(r.RequestURI, "/")
uri, err := api.Parse(requestUri)
if err != nil {
respondError(w, r, "Bad Request", http.StatusBadRequest)
}
uri.Scheme = "bzz-list"
msg += fmt.Sprintf("Disambiguation:<br/>Your request may refer to multiple choices.<br/>Click <a class=\"orange\" href='"+"/"+uri.String()+"'>here</a> if your browser does not redirect you within 5 seconds.<script>setTimeout(\"location.href='%s';\",5000);</script><br/>", "/"+uri.String())
respondTemplate(w, r, "error", msg, http.StatusMultipleChoices)
}
func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) {
log.Debug("respondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()))
respond(w, r, &ResponseParams{
Code: code,
Msg: template.HTML(msg),
Timestamp: time.Now().Format(time.RFC1123),
template: TemplatesMap[templateName],
})
}
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) {
log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code)
respondTemplate(w, r, "error", msg, code)
}
func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
w.WriteHeader(params.Code)
if params.Code >= 400 {
w.Header().Del("Cache-Control")
w.Header().Del("ETag")
}
acceptHeader := r.Header.Get("Accept")
// this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8"
if strings.Contains(acceptHeader, "application/json") | else if strings.Contains(acceptHeader, "text/html") {
respondHTML(w, r, params)
} else {
respondPlaintext(w, r, params) //returns nice errors for curl
}
}
func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
htmlCounter.Inc(1)
log.Info("respondHTML", "ruid", GetRUID(r.Context()), "code", params.Code)
err := params.template.Execute(w, params)
if err != nil {
log.Error(err.Error())
}
}
func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error {
jsonCounter.Inc(1)
log.Info("respondJSON", "ruid", GetRUID(r.Context()), "code", params.Code)
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(params)
}
func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error {
plaintextCounter.Inc(1)
log.Info("respondPlaintext", "ruid", GetRUID(r.Context()), "code", params.Code)
w.Header().Set("Content-Type", "text/plain")
strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n"
strToWrite += "Message: " + string(params.Msg) + "\n"
strToWrite += "Timestamp: " + params.Timestamp + "\n"
_, err := w.Write([]byte(strToWrite))
return err
}
| {
if err := respondJSON(w, r, params); err != nil {
respondError(w, r, "Internal server error", http.StatusInternalServerError)
}
} |
list_case_labels_response.py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListCaseLabelsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'case_label_list': 'list[CaseLabelInfo]'
}
attribute_map = {
'case_label_list': 'case_label_list'
}
def __init__(self, case_label_list=None):
"""ListCaseLabelsResponse - a model defined in huaweicloud sdk"""
super(ListCaseLabelsResponse, self).__init__()
self._case_label_list = None
self.discriminator = None
if case_label_list is not None:
self.case_label_list = case_label_list
@property
def case_label_list(self):
"""Gets the case_label_list of this ListCaseLabelsResponse.
工单关联的标签列表
:return: The case_label_list of this ListCaseLabelsResponse.
:rtype: list[CaseLabelInfo]
"""
return self._case_label_list
@case_label_list.setter
def case_label_list(self, case_label_list):
"""Sets the case_label_list of this ListCaseLabelsResponse.
工单关联的标签列表
:param case_label_list: The case_label_list of this ListCaseLabelsResponse.
:type: list[CaseLabelInfo]
"""
self._case_label_list = case_label_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListCaseLabelsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Retu | ue if both objects are not equal"""
return not self == other
| rns tr |
MarketingRedefiningGetactlist.py | from aliexpress.api.base import RestApi
class | (RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.param_seller_coupon_activity_api_query = None
def getapiname(self):
return "aliexpress.marketing.redefining.getactlist"
| AliexpressMarketingRedefiningGetactlistRequest |
destroy.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/pulumi/pulumi/pkg/v2/backend"
"github.com/pulumi/pulumi/pkg/v2/backend/display"
"github.com/pulumi/pulumi/pkg/v2/engine"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/cmdutil"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/result"
)
func newDestroyCmd() *cobra.Command {
var debug bool
var stack string
var message string
var execKind string
// Flags for engine.UpdateOptions.
var diffDisplay bool
var eventLogPath string
var parallel int
var refresh bool
var showConfig bool
var showReplacementSteps bool
var showSames bool
var skipPreview bool
var suppressOutputs bool
var suppressPermaLink bool
var yes bool
var targets *[]string
var targetDependents bool
var cmd = &cobra.Command{
Use: "destroy",
SuggestFor: []string{"delete", "down", "kill", "remove", "rm", "stop"},
Short: "Destroy an existing stack and its resources",
Long: "Destroy an existing stack and its resources\n" +
"\n" +
"This command deletes an entire existing stack by name. The current state is\n" +
"loaded from the associated state file in the workspace. After running to completion,\n" +
"all of this stack's resources and associated state will be gone.\n" +
"\n" +
"Warning: this command is generally irreversible and should be used with great care.",
Args: cmdutil.NoArgs,
Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result {
yes = yes || skipConfirmations()
interactive := cmdutil.Interactive()
if !interactive && !yes {
return result.FromError(errors.New("--yes must be passed in to proceed when running in non-interactive mode"))
}
opts, err := updateFlagsToOptions(interactive, skipPreview, yes)
if err != nil {
return result.FromError(err)
}
var displayType = display.DisplayProgress
if diffDisplay {
displayType = display.DisplayDiff
}
opts.Display = display.Options{
Color: cmdutil.GetGlobalColorization(),
ShowConfig: showConfig,
ShowReplacementSteps: showReplacementSteps,
ShowSameResources: showSames,
SuppressOutputs: suppressOutputs,
SuppressPermaLink: suppressPermaLink,
IsInteractive: interactive,
Type: displayType,
EventLogPath: eventLogPath,
Debug: debug,
}
s, err := requireStack(stack, false, opts.Display, true /*setCurrent*/)
if err != nil {
return result.FromError(err)
}
proj, root, err := readProject()
if err != nil {
return result.FromError(err)
}
m, err := getUpdateMetadata(message, root, execKind)
if err != nil {
return result.FromError(errors.Wrap(err, "gathering environment metadata"))
}
sm, err := getStackSecretsManager(s)
if err != nil {
return result.FromError(errors.Wrap(err, "getting secrets manager"))
}
cfg, err := getStackConfiguration(s, sm)
if err != nil {
return result.FromError(errors.Wrap(err, "getting stack configuration"))
}
targetUrns := []resource.URN{}
for _, t := range *targets {
targetUrns = append(targetUrns, resource.URN(t))
}
opts.Engine = engine.UpdateOptions{
Parallel: parallel,
Debug: debug,
Refresh: refresh,
DestroyTargets: targetUrns,
TargetDependents: targetDependents,
UseLegacyDiff: useLegacyDiff(),
DisableProviderPreview: disableProviderPreview(),
}
_, res := s.Destroy(commandContext(), backend.UpdateOperation{
Proj: proj,
Root: root,
M: m,
Opts: opts,
StackConfiguration: cfg,
SecretsManager: sm,
Scopes: cancellationScopes, | fmt.Printf("The resources in the stack have been deleted, but the history and configuration "+
"associated with the stack are still maintained. \nIf you want to remove the stack "+
"completely, run 'pulumi stack rm %s'.\n", s.Ref())
} else if res != nil && res.Error() == context.Canceled {
return result.FromError(errors.New("destroy cancelled"))
}
return PrintEngineResult(res)
}),
}
cmd.PersistentFlags().BoolVarP(
&debug, "debug", "d", false,
"Print detailed debugging output during resource operations")
cmd.PersistentFlags().StringVarP(
&stack, "stack", "s", "",
"The name of the stack to operate on. Defaults to the current stack")
cmd.PersistentFlags().StringVar(
&stackConfigFile, "config-file", "",
"Use the configuration values in the specified file rather than detecting the file name")
cmd.PersistentFlags().StringVarP(
&message, "message", "m", "",
"Optional message to associate with the destroy operation")
targets = cmd.PersistentFlags().StringArrayP(
"target", "t", []string{},
"Specify a single resource URN to destroy. All resources necessary to destroy this target will also be destroyed."+
" Multiple resources can be specified using: --target urn1 --target urn2")
cmd.PersistentFlags().BoolVar(
&targetDependents, "target-dependents", false,
"Allows destroying of dependent targets discovered but not specified in --target list")
// Flags for engine.UpdateOptions.
cmd.PersistentFlags().BoolVar(
&diffDisplay, "diff", false,
"Display operation as a rich diff showing the overall change")
cmd.PersistentFlags().IntVarP(
¶llel, "parallel", "p", defaultParallel,
"Allow P resource operations to run in parallel at once (1 for no parallelism). Defaults to unbounded.")
cmd.PersistentFlags().BoolVarP(
&refresh, "refresh", "r", false,
"Refresh the state of the stack's resources before this update")
cmd.PersistentFlags().BoolVar(
&showConfig, "show-config", false,
"Show configuration keys and variables")
cmd.PersistentFlags().BoolVar(
&showReplacementSteps, "show-replacement-steps", false,
"Show detailed resource replacement creates and deletes instead of a single step")
cmd.PersistentFlags().BoolVar(
&showSames, "show-sames", false,
"Show resources that don't need to be updated because they haven't changed, alongside those that do")
cmd.PersistentFlags().BoolVar(
&skipPreview, "skip-preview", false,
"Do not perform a preview before performing the destroy")
cmd.PersistentFlags().BoolVar(
&suppressOutputs, "suppress-outputs", false,
"Suppress display of stack outputs (in case they contain sensitive values)")
cmd.PersistentFlags().BoolVar(
&suppressPermaLink, "suppress-permalink", false,
"Suppress display of the state permalink")
cmd.PersistentFlags().BoolVarP(
&yes, "yes", "y", false,
"Automatically approve and perform the destroy after previewing it")
if hasDebugCommands() {
cmd.PersistentFlags().StringVar(
&eventLogPath, "event-log", "",
"Log events to a file at this path")
}
// internal flag
cmd.PersistentFlags().StringVar(&execKind, "exec-kind", "", "")
// ignore err, only happens if flag does not exist
_ = cmd.PersistentFlags().MarkHidden("exec-kind")
return cmd
} | })
if res == nil && len(*targets) == 0 { |
15.2.3.9-2-d-7.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above | /**
* @path ch15/15.2/15.2.3/15.2.3.9/15.2.3.9-2-d-7.js
* @description Object.freeze - 'O' is a RegExp object
*/
function testcase() {
var regObj = new RegExp();
Object.freeze(regObj);
return Object.isFrozen(regObj);
}
runTestCase(testcase); | /// copyright and this notice and otherwise comply with the Use Terms. |
bdist_rpm.py | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to | def run(self):
# ensure distro name is up-to-date
self.run_command("egg_info")
orig.bdist_rpm.run(self)
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
spec = [
line.replace(
"setup.py install ",
"setup.py install --single-version-externally-managed ",
).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
for line in spec
]
return spec | disable eggs in RPM distributions.
"""
|
FVC_utilities.py | import os
import re
import io
import numpy as np
import PIL.Image
import typing
from pynger.types import Image, Mask, Field
from pynger.fingerprint.tuning_lro import LROEstimator
from pynger.fingerprint.sampling import convert_to_full, subsample
from pynger.field.manipulation import polar2cart
from pynger.misc import recursively_scan_dir_gen, recursively_scan_dir, random_combination
from itertools import combinations, starmap
class Proxy:
def write(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
def read(self, path: str):
raise NotImplementedError("Derived classes must reimplement this method")
class MaskProxy(Proxy):
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], np.ndarray):
self.mask = args[0]
elif isinstance(args[0], str):
self.read(args[0])
else:
raise TypeError("Arguments not recognized")
else:
self.mask = None
def read(self, path: str, full: bool = True):
""" Reads the mask, according to FVC-OnGoing specs.
Args:
path: The input file path (generally with .fg extension)
full: Whether the full output should be returned (not implemented yet)
Return:
The boolean mask represented in the given file.
"""
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'r') as f:
shape = tuple([int(n) for n in f.readline().split()])
mask = np.empty(shape, dtype=bool)
for row_n, line in enumerate(f):
mask[row_n,:] = [bool(int(n)) for n in line.split()]
self.mask = mask
return mask
def write(self, path: str):
""" Writes the mask, according to FVC-OnGoing specs.
Args:
path: The output file path (generally with .fg extension)
"""
with open(path, 'w') as f:
print(self.mask.shape, file=f)
for line in self.mask.astype(int):
print(line, file=f)
class FieldProxy(Proxy):
def __init__(self, *args):
if len(args) == 2 and isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):
self.angle, self.mask = args[0].copy(), args[1].copy()
elif len(args) == 1 and isinstance(args[0], str):
self.read(args[0])
else:
self.angle, self.mask = None, None
def read(self, path: str, full: bool = True):
""" Reads the field, according to FVC-OnGoing specs.
Args:
path: The input file path (generally with .gt extension)
full: Whether the full output should be returned
Return:
The field represented in the given file.
"""
if not os.path.exists(path):
raise RuntimeError("The input file does not exist")
with open(path, 'rb') as f:
# Read and discard the header. To visualize -> print(f.read(8).decode('ascii'))
f.read(8)
# Read the field specifications
get_next_int = lambda: int.from_bytes(f.read(4), byteorder='little', signed=True)
self.border_x = get_next_int()
self.border_y = get_next_int()
self.step_x = get_next_int()
self.step_y = get_next_int()
cols = get_next_int()
rows = get_next_int()
# Read the values
get_next_uint8 = lambda: int.from_bytes(f.read(1), byteorder='little', signed=False)
content = [(get_next_uint8(), get_next_uint8()) for _ in range(cols*rows)]
angle, mask = zip(*content)
angle = np.array(angle, dtype=float).reshape((rows, cols))
angle *= np.pi / 255.0
mask = np.array(mask, dtype=bool).reshape((rows, cols))
# Optionally convert to full matrix
if full:
self.angle = convert_to_full(angle, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
self.mask = convert_to_full(mask, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')
else:
self.angle = angle
self.mask = mask
return self.angle, self.mask
def write(self, path: str, **kwargs):
""" Writes the field, according to FVC-OnGoing specs.
Args:
path: The output file path (generally with .gt extension)
Keyword Args:
border_x (int): Horizontal border used to sample the field (defaults to 14)
border_y (int): Vertical border used to sample the field (defaults to 14)
step_x (int): Horizontal distance between two conscutive sample points (defaults to 8)
step_y (int): Vertical distance between two conscutive sample points (defaults to 8)
subsample (bool): Whether the input shall be sub-sampled before saving it
Note:
The field is subsampled in the process. To avoid this behaviour, set border parameters to 0 and step parameters to 1.
"""
# Read parameters
bx = kwargs.get('border_x', 14)
by = kwargs.get('border_y', 14)
sx = kwargs.get('step_x', 8)
sy = kwargs.get('step_y', 8)
needSubsample = kwargs.pop('subsample', True)
# Sample the field
if self.angle.shape != self.mask.shape:
raise RuntimeError('angle and mask sizes mismatch')
if needSubsample:
angle = subsample(self.angle, is_field=False, smooth=False, **kwargs)
mask = subsample(self.mask, is_field=False, smooth=False, **kwargs)
else:
angle = self.angle
mask = self.mask
with open(path, 'wb') as f:
f.write("DIRIMG00".encode('ascii'))
# Read the field specifications
put_int = lambda n: f.write(int(n).to_bytes(4, byteorder='little', signed=True))
put_int(bx)
put_int(by)
put_int(sx)
put_int(sy)
rows, cols = angle.shape
put_int(cols)
put_int(rows)
# Values conversion
angle *= 255.0 / np.pi
angle = angle.astype(int)
mask = mask.astype(int)
mask *= int(255 / mask.max())
# Write the values
put_uint8 = lambda n: f.write(int(n).to_bytes(1, byteorder='little', signed=False))
for a, m in zip(angle.ravel(), mask.ravel()):
put_uint8(a)
put_uint8(m)
def loadDataset(path: str, loadGT: bool = True):
""" Loads the FVC-TEST dataset.
Args:
path: Directory with the FVC-TEST dataset.
loadGT: whether to load the ground truth information or not.
Return:
A generator of pairs (X, y) where X has the original image, its mask and its border specifications, and y is the corresponding orientation field ground truth.
"""
with open(path, 'r') as f:
_ = int(f.readline())
for line in f:
name, step, bd = line.split()
step = int(step)
bd = int(bd)
# Load image
image_path = os.path.join(os.path.dirname(path), name)
image = np.array(PIL.Image.open(image_path).convert('L')).astype(float)
# Load mask
mask_path = os.path.splitext(image_path)[0]+'.fg'
mask = MaskProxy().read(mask_path)
# Set specifications
specs = [bd, bd, step, step]
# Adjust image shape
_mask = convert_to_full(mask, border_x=bd, border_y=bd, step_x=step, step_y=step, mode='constant')
image = image[:_mask.shape[0], :_mask.shape[1]]
# Load the ground truth field
if loadGT:
field_path = os.path.splitext(image_path)[0]+'.gt'
lro, _ = FieldProxy().read(field_path, full=False)
field = polar2cart(lro, 1, retField=True)
# Serialize input data and append to X and the ground truth information
yield (LROEstimator.serialize_Xrow(image, mask, specs), LROEstimator.serialize_yrow(field))
else:
yield (LROEstimator.serialize_Xrow(image, mask, specs), image_path)
def countDatasetElements(path):
with open(path, 'r') as f:
return int(f.readline())
def loadSegmentationDataset(sdir: str, odir: str):
""" Loads the dataset for segmentation evaluation.
Args:
sdir: Path to the segmented images; all the images shall be direct children of this directory.
odir: Path to the original images; this folder shall contain as direct children the folder of the databases FVC2000, FVC2002, FVC2004 (from DB1a, DB1b, to DB4a, DB4b) - e.g. the main root of the DVD shipped with Handbook of Fingerprint Recognition.
Note:
If some DB is not available a warning will be issued, but the other images will be loaded anyway.
Return:
A generator of pairs (X, y) where X is the original image, and y the corresponding ground truth segmentation image.
"""
pattern = re.compile('(FVC\\d+)_(\\w+)_\\w+_(\\d+)_(\\d+)')
sfiles = recursively_scan_dir_gen(sdir, '.png')
for sfile in sfiles:
basename = os.path.basename(sfile)
match = pattern.match(basename)
if match:
ofile = os.path.join(
odir,
match[1], # FVCxxxx
'Dbs',
# converts DB1 to Db1, them appends an 'a' for the first 100 images, and a 'b' otherwise
match[2].title() + '_' + ('a' if int(match[3])<=100 else 'b'),
'{}_{}.tif'.format(match[3], match[4]) # append the filename
)
yield (ofile, sfile)
def loadMatchingDatasetFVC(path: str):
""" Loads the FVC-TEST dataset.
Args:
path: Directory with the FVC-TEST dataset.
Return:
A dictionary whose keys are pairs of:
- tuples containing a reference to the database and competition where the images belong, and values are lists of pairs (X, y) where X has the pair of image filenames, and y is the corresponding ground truth label, i.e. a 0 for reject or 1 for accept;
- the list of all images found in the given folder.
"""
_, all_image_files = recursively_scan_dir(path, '.tif')
_, index_files = recursively_scan_dir(path, '.MFA')
comp_pattern = re.compile('(FVC\\d+)')
competitions = {}
# Loop over the four possible databases
for db_n in range(1, 5):
for MFA in index_files:
# Get index for false matches
MFR = MFA[:-1]+'R'
# Retrieve competition
match = comp_pattern.search(MFA)
if match:
competition = match[1]
else:
competition = 'NULL'
# Retrieve database type (a or b)
db_type = MFA[-5].lower()
# Create a new key for this competition
comp_key = (competition, db_n, db_type)
competitions[comp_key] = []
# Generate database name
db_name = 'Db{}_{}'.format(db_n, db_type)
# Take the subset of images related to this dataset
image_files = [name for name in all_image_files if os.path.basename(os.path.dirname(name)) == db_name]
# Load all the pairs that will be matched
challenge_pairs = []
for ifile, gt in zip([MFA, MFR], [0, 1]):
dir_ = os.path.dirname(ifile)
with open(ifile, 'r') as file_:
for line in file_:
file1, file2 = line.split()
path1 = os.path.join(dir_, db_name, file1)
path2 = os.path.join(dir_, db_name, file2)
challenge_pairs.append( ((path1, path2), gt) )
# Update the competition dictionary
competitions[comp_key] = (challenge_pairs, image_files)
return competitions
def loadMatchingDatasetNIST(path: str, ratio: float = 2.0, verbose: bool = True):
""" Load NIST SD04 for matching.
Args:
path: Path to the folder containing the images.
ratio: Ratio between the number of impostor and genuine matches.
verbose: whether to print some basic information about the dataset.
Return:
A tuple (X, y, lenX) where X yields pairs of images, y generates 0 for a non-match and 1 for a match, lenX is the total number of elements.
"""
# Load all images
_, image_files = recursively_scan_dir(path, ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'])
# Split between first and second impression
f_image_files = list(filter(lambda s: os.path.basename(s)[0]=='f', image_files))
# Collect the genuine matches
challenge_pairs = []
for ffile in f_image_files:
basename = os.path.basename(ffile)
basename = 's'+basename[1:]
sfile = os.path.join( os.path.dirname(ffile), basename )
challenge_pairs.append( ((ffile, sfile), 1) )
# Get the total number of impostor and genuine matches
genuine_matches = len(challenge_pairs)
impostor_matches = int(genuine_matches * ratio)
total_matches = genuine_matches + impostor_matches
if verbose:
|
# Collect the impostor matches:
while True:
pair = random_combination(image_files, 2)
left_bname = os.path.basename(pair[0])
right_bname = os.path.basename(pair[1])
if left_bname[1:] == right_bname[1:]:
continue # genuine or the same image
else:
challenge_pairs.append( (pair, 0) )
if len(challenge_pairs) >= total_matches:
break
competitions = {
('NIST', 'SD04', '_'): (challenge_pairs, image_files)
}
return competitions
| print('{} genuine matches and {} impostor matches will be selected'.format(genuine_matches, impostor_matches)) |
vuejs-datepicker.esm.js | function _typeof(obj) {
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
_typeof = function (obj) {
return typeof obj;
};
} else {
_typeof = function (obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
};
}
return _typeof(obj);
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
function | (Constructor, protoProps, staticProps) {
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
if (staticProps) _defineProperties(Constructor, staticProps);
return Constructor;
}
function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true
});
} else {
obj[key] = value;
}
return obj;
}
function _objectSpread(target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i] != null ? arguments[i] : {};
var ownKeys = Object.keys(source);
if (typeof Object.getOwnPropertySymbols === 'function') {
ownKeys = ownKeys.concat(Object.getOwnPropertySymbols(source).filter(function (sym) {
return Object.getOwnPropertyDescriptor(source, sym).enumerable;
}));
}
ownKeys.forEach(function (key) {
_defineProperty(target, key, source[key]);
});
}
return target;
}
var Language =
/*#__PURE__*/
function () {
function Language(language, months, monthsAbbr, days) {
_classCallCheck(this, Language);
this.language = language;
this.months = months;
this.monthsAbbr = monthsAbbr;
this.days = days;
this.rtl = false;
this.ymd = false;
this.yearSuffix = '';
}
_createClass(Language, [{
key: "language",
get: function get() {
return this._language;
},
set: function set(language) {
if (typeof language !== 'string') {
throw new TypeError('Language must be a string');
}
this._language = language;
}
}, {
key: "months",
get: function get() {
return this._months;
},
set: function set(months) {
if (months.length !== 12) {
throw new RangeError("There must be 12 months for ".concat(this.language, " language"));
}
this._months = months;
}
}, {
key: "monthsAbbr",
get: function get() {
return this._monthsAbbr;
},
set: function set(monthsAbbr) {
if (monthsAbbr.length !== 12) {
throw new RangeError("There must be 12 abbreviated months for ".concat(this.language, " language"));
}
this._monthsAbbr = monthsAbbr;
}
}, {
key: "days",
get: function get() {
return this._days;
},
set: function set(days) {
if (days.length !== 7) {
throw new RangeError("There must be 7 days for ".concat(this.language, " language"));
}
this._days = days;
}
}]);
return Language;
}(); // eslint-disable-next-line
var en = new Language('English', ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'], ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']) // eslint-disable-next-line
;
var utils = {
/**
* @type {Boolean}
*/
useUtc: false,
/**
* Returns the full year, using UTC or not
* @param {Date} date
*/
getFullYear: function getFullYear(date) {
return this.useUtc ? date.getUTCFullYear() : date.getFullYear();
},
/**
* Returns the month, using UTC or not
* @param {Date} date
*/
getMonth: function getMonth(date) {
return this.useUtc ? date.getUTCMonth() : date.getMonth();
},
/**
* Returns the date, using UTC or not
* @param {Date} date
*/
getDate: function getDate(date) {
return this.useUtc ? date.getUTCDate() : date.getDate();
},
/**
* Returns the day, using UTC or not
* @param {Date} date
*/
getDay: function getDay(date) {
return this.useUtc ? date.getUTCDay() : date.getDay();
},
/**
* Returns the hours, using UTC or not
* @param {Date} date
*/
getHours: function getHours(date) {
return this.useUtc ? date.getUTCHours() : date.getHours();
},
/**
* Returns the minutes, using UTC or not
* @param {Date} date
*/
getMinutes: function getMinutes(date) {
return this.useUtc ? date.getUTCMinutes() : date.getMinutes();
},
/**
* Sets the full year, using UTC or not
* @param {Date} date
*/
setFullYear: function setFullYear(date, value, useUtc) {
return this.useUtc ? date.setUTCFullYear(value) : date.setFullYear(value);
},
/**
* Sets the month, using UTC or not
* @param {Date} date
*/
setMonth: function setMonth(date, value, useUtc) {
return this.useUtc ? date.setUTCMonth(value) : date.setMonth(value);
},
/**
* Sets the date, using UTC or not
* @param {Date} date
* @param {Number} value
*/
setDate: function setDate(date, value, useUtc) {
return this.useUtc ? date.setUTCDate(value) : date.setDate(value);
},
/**
* Check if date1 is equivalent to date2, without comparing the time
* @see https://stackoverflow.com/a/6202196/4455925
* @param {Date} date1
* @param {Date} date2
*/
compareDates: function compareDates(date1, date2) {
var d1 = new Date(date1.getTime());
var d2 = new Date(date2.getTime());
if (this.useUtc) {
d1.setUTCHours(0, 0, 0, 0);
d2.setUTCHours(0, 0, 0, 0);
} else {
d1.setHours(0, 0, 0, 0);
d2.setHours(0, 0, 0, 0);
}
return d1.getTime() === d2.getTime();
},
/**
* Validates a date object
* @param {Date} date - an object instantiated with the new Date constructor
* @return {Boolean}
*/
isValidDate: function isValidDate(date) {
if (Object.prototype.toString.call(date) !== '[object Date]') {
return false;
}
return !isNaN(date.getTime());
},
/**
* Return abbreviated week day name
* @param {Date}
* @param {Array}
* @return {String}
*/
getDayNameAbbr: function getDayNameAbbr(date, days) {
if (_typeof(date) !== 'object') {
throw TypeError('Invalid Type');
}
return days[this.getDay(date)];
},
/**
* Return name of the month
* @param {Number|Date}
* @param {Array}
* @return {String}
*/
getMonthName: function getMonthName(month, months) {
if (!months) {
throw Error('missing 2nd parameter Months array');
}
if (_typeof(month) === 'object') {
return months[this.getMonth(month)];
}
if (typeof month === 'number') {
return months[month];
}
throw TypeError('Invalid type');
},
/**
* Return an abbreviated version of the month
* @param {Number|Date}
* @return {String}
*/
getMonthNameAbbr: function getMonthNameAbbr(month, monthsAbbr) {
if (!monthsAbbr) {
throw Error('missing 2nd paramter Months array');
}
if (_typeof(month) === 'object') {
return monthsAbbr[this.getMonth(month)];
}
if (typeof month === 'number') {
return monthsAbbr[month];
}
throw TypeError('Invalid type');
},
/**
* Alternative get total number of days in month
* @param {Number} year
* @param {Number} m
* @return {Number}
*/
daysInMonth: function daysInMonth(year, month) {
return /8|3|5|10/.test(month) ? 30 : month === 1 ? !(year % 4) && year % 100 || !(year % 400) ? 29 : 28 : 31;
},
/**
* Get nth suffix for date
* @param {Number} day
* @return {String}
*/
getNthSuffix: function getNthSuffix(day) {
switch (day) {
case 1:
case 21:
case 31:
return 'st';
case 2:
case 22:
return 'nd';
case 3:
case 23:
return 'rd';
default:
return 'th';
}
},
/**
* Formats date object
* @param {Date}
* @param {String}
* @param {Object}
* @return {String}
*/
formatDate: function formatDate(date, format, translation) {
translation = !translation ? en : translation;
var year = this.getFullYear(date);
var month = this.getMonth(date) + 1;
var day = this.getDate(date);
var str = format.replace(/dd/, ('0' + day).slice(-2)).replace(/d/, day).replace(/yyyy/, year).replace(/yy/, String(year).slice(2)).replace(/MMMM/, this.getMonthName(this.getMonth(date), translation.months)).replace(/MMM/, this.getMonthNameAbbr(this.getMonth(date), translation.monthsAbbr)).replace(/MM/, ('0' + month).slice(-2)).replace(/M(?!a|ä|e)/, month).replace(/su/, this.getNthSuffix(this.getDate(date))).replace(/D(?!e|é|i)/, this.getDayNameAbbr(date, translation.days));
return str;
},
/**
* Creates an array of dates for each day in between two dates.
* @param {Date} start
* @param {Date} end
* @return {Array}
*/
createDateArray: function createDateArray(start, end) {
var dates = [];
while (start <= end) {
dates.push(new Date(start));
start = this.setDate(new Date(start), this.getDate(new Date(start)) + 1);
}
return dates;
},
/**
* method used as a prop validator for input values
* @param {*} val
* @return {Boolean}
*/
validateDateInput: function validateDateInput(val) {
return val === null || val instanceof Date || typeof val === 'string' || typeof val === 'number';
}
};
var makeDateUtils = function makeDateUtils(useUtc) {
return _objectSpread({}, utils, {
useUtc: useUtc
});
};
var utils$1 = _objectSpread({}, utils) // eslint-disable-next-line
;
var script = {
props: {
selectedDate: Date,
resetTypedDate: [Date],
format: [String, Function],
translation: Object,
inline: Boolean,
id: String,
name: String,
refName: String,
openDate: Date,
placeholder: String,
inputClass: [String, Object, Array],
clearButton: Boolean,
clearButtonIcon: String,
calendarButton: Boolean,
calendarButtonIcon: String,
calendarButtonIconContent: String,
disabled: Boolean,
required: Boolean,
typeable: Boolean,
bootstrapStyling: Boolean,
useUtc: Boolean
},
data: function data() {
var constructedDateUtils = makeDateUtils(this.useUtc);
return {
input: null,
typedDate: false,
utils: constructedDateUtils
};
},
computed: {
formattedValue: function formattedValue() {
if (!this.selectedDate) {
return null;
}
if (this.typedDate) {
return this.typedDate;
}
return typeof this.format === 'function' ? this.format(this.selectedDate) : this.utils.formatDate(new Date(this.selectedDate), this.format, this.translation);
},
computedInputClass: function computedInputClass() {
if (this.bootstrapStyling) {
if (typeof this.inputClass === 'string') {
return [this.inputClass, 'form-control'].join(' ');
}
return _objectSpread({
'form-control': true
}, this.inputClass);
}
return this.inputClass;
}
},
watch: {
resetTypedDate: function resetTypedDate() {
this.typedDate = false;
}
},
methods: {
showCalendar: function showCalendar() {
this.$emit('showCalendar');
},
/**
* Attempt to parse a typed date
* @param {Event} event
*/
parseTypedDate: function parseTypedDate(event) {
// close calendar if escape or enter are pressed
if ([27, // escape
13 // enter
].includes(event.keyCode)) {
this.input.blur();
}
if (this.typeable) {
var typedDate = Date.parse(this.input.value);
if (!isNaN(typedDate)) {
this.typedDate = this.input.value;
this.$emit('typedDate', new Date(this.typedDate));
}
}
},
/**
* nullify the typed date to defer to regular formatting
* called once the input is blurred
*/
inputBlurred: function inputBlurred() {
if (this.typeable && isNaN(Date.parse(this.input.value))) {
this.clearDate();
this.input.value = null;
this.typedDate = null;
}
this.$emit('closeCalendar');
},
/**
* emit a clearDate event
*/
clearDate: function clearDate() {
this.$emit('clearDate');
}
},
mounted: function mounted() {
this.input = this.$el.querySelector('input');
}
} // eslint-disable-next-line
;
function normalizeComponent(template, style, script, scopeId, isFunctionalTemplate, moduleIdentifier
/* server only */
, shadowMode, createInjector, createInjectorSSR, createInjectorShadow) {
if (typeof shadowMode !== 'boolean') {
createInjectorSSR = createInjector;
createInjector = shadowMode;
shadowMode = false;
} // Vue.extend constructor export interop.
var options = typeof script === 'function' ? script.options : script; // render functions
if (template && template.render) {
options.render = template.render;
options.staticRenderFns = template.staticRenderFns;
options._compiled = true; // functional template
if (isFunctionalTemplate) {
options.functional = true;
}
} // scopedId
if (scopeId) {
options._scopeId = scopeId;
}
var hook;
if (moduleIdentifier) {
// server build
hook = function hook(context) {
// 2.3 injection
context = context || // cached call
this.$vnode && this.$vnode.ssrContext || // stateful
this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext; // functional
// 2.2 with runInNewContext: true
if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') {
context = __VUE_SSR_CONTEXT__;
} // inject component styles
if (style) {
style.call(this, createInjectorSSR(context));
} // register component module identifier for async chunk inference
if (context && context._registeredComponents) {
context._registeredComponents.add(moduleIdentifier);
}
}; // used by ssr in case component is cached and beforeCreate
// never gets called
options._ssrRegister = hook;
} else if (style) {
hook = shadowMode ? function () {
style.call(this, createInjectorShadow(this.$root.$options.shadowRoot));
} : function (context) {
style.call(this, createInjector(context));
};
}
if (hook) {
if (options.functional) {
// register for functional component in vue file
var originalRender = options.render;
options.render = function renderWithStyleInjection(h, context) {
hook.call(context);
return originalRender(h, context);
};
} else {
// inject component registration as beforeCreate hook
var existing = options.beforeCreate;
options.beforeCreate = existing ? [].concat(existing, hook) : [hook];
}
}
return script;
}
var normalizeComponent_1 = normalizeComponent;
/* script */
const __vue_script__ = script;
/* template */
var __vue_render__ = function() {
var _vm = this;
var _h = _vm.$createElement;
var _c = _vm._self._c || _h;
return _c(
"div",
{ class: { "input-group": _vm.bootstrapStyling } },
[
_vm.calendarButton
? _c(
"span",
{
staticClass: "vdp-datepicker__calendar-button",
class: { "input-group-prepend": _vm.bootstrapStyling },
style: { "cursor:not-allowed;": _vm.disabled },
on: { click: _vm.showCalendar }
},
[
_c(
"span",
{ class: { "input-group-text": _vm.bootstrapStyling } },
[
_c("i", { class: _vm.calendarButtonIcon }, [
_vm._v(
"\n " +
_vm._s(_vm.calendarButtonIconContent) +
"\n "
),
!_vm.calendarButtonIcon
? _c("span", [_vm._v("…")])
: _vm._e()
])
]
)
]
)
: _vm._e(),
_vm._v(" "),
_c("input", {
ref: _vm.refName,
class: _vm.computedInputClass,
attrs: {
type: _vm.inline ? "hidden" : "text",
name: _vm.name,
id: _vm.id,
"open-date": _vm.openDate,
placeholder: _vm.placeholder,
"clear-button": _vm.clearButton,
disabled: _vm.disabled,
required: _vm.required,
readonly: !_vm.typeable,
autocomplete: "off"
},
domProps: { value: _vm.formattedValue },
on: {
click: _vm.showCalendar,
keyup: _vm.parseTypedDate,
blur: _vm.inputBlurred
}
}),
_vm._v(" "),
_vm.clearButton && _vm.selectedDate
? _c(
"span",
{
staticClass: "vdp-datepicker__clear-button",
class: { "input-group-append": _vm.bootstrapStyling },
on: {
click: function($event) {
return _vm.clearDate()
}
}
},
[
_c(
"span",
{ class: { "input-group-text": _vm.bootstrapStyling } },
[
_c("i", { class: _vm.clearButtonIcon }, [
!_vm.clearButtonIcon ? _c("span", [_vm._v("×")]) : _vm._e()
])
]
)
]
)
: _vm._e(),
_vm._v(" "),
_vm._t("afterDateInput")
],
2
)
};
var __vue_staticRenderFns__ = [];
__vue_render__._withStripped = true;
/* style */
const __vue_inject_styles__ = undefined;
/* scoped */
const __vue_scope_id__ = undefined;
/* module identifier */
const __vue_module_identifier__ = undefined;
/* functional template */
const __vue_is_functional_template__ = false;
/* style inject */
/* style inject SSR */
var DateInput = normalizeComponent_1(
{ render: __vue_render__, staticRenderFns: __vue_staticRenderFns__ },
__vue_inject_styles__,
__vue_script__,
__vue_scope_id__,
__vue_is_functional_template__,
__vue_module_identifier__,
undefined,
undefined
);
//
var script$1 = {
props: {
showDayView: Boolean,
selectedDate: Date,
pageDate: Date,
pageTimestamp: Number,
fullMonthName: Boolean,
allowedToShowView: Function,
dayCellContent: {
type: Function,
"default": function _default(day) {
return day.date;
}
},
disabledDates: Object,
highlighted: Object,
calendarClass: [String, Object, Array],
calendarStyle: Object,
translation: Object,
isRtl: Boolean,
mondayFirst: Boolean,
useUtc: Boolean
},
data: function data() {
var constructedDateUtils = makeDateUtils(this.useUtc);
return {
utils: constructedDateUtils
};
},
computed: {
/**
* Returns an array of day names
* @return {String[]}
*/
daysOfWeek: function daysOfWeek() {
if (this.mondayFirst) {
var tempDays = this.translation.days.slice();
tempDays.push(tempDays.shift());
return tempDays;
}
return this.translation.days;
},
/**
* Returns the day number of the week less one for the first of the current month
* Used to show amount of empty cells before the first in the day calendar layout
* @return {Number}
*/
blankDays: function blankDays() {
var d = this.pageDate;
var dObj = this.useUtc ? new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), 1)) : new Date(d.getFullYear(), d.getMonth(), 1, d.getHours(), d.getMinutes());
if (this.mondayFirst) {
return this.utils.getDay(dObj) > 0 ? this.utils.getDay(dObj) - 1 : 6;
}
return this.utils.getDay(dObj);
},
/**
* @return {Object[]}
*/
days: function days() {
var d = this.pageDate;
var days = []; // set up a new date object to the beginning of the current 'page'
var dObj = this.useUtc ? new Date(Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), 1)) : new Date(d.getFullYear(), d.getMonth(), 1, d.getHours(), d.getMinutes());
var daysInMonth = this.utils.daysInMonth(this.utils.getFullYear(dObj), this.utils.getMonth(dObj));
for (var i = 0; i < daysInMonth; i++) {
days.push({
date: this.utils.getDate(dObj),
timestamp: dObj.getTime(),
isSelected: this.isSelectedDate(dObj),
isDisabled: this.isDisabledDate(dObj),
isHighlighted: this.isHighlightedDate(dObj),
isHighlightStart: this.isHighlightStart(dObj),
isHighlightEnd: this.isHighlightEnd(dObj),
isToday: this.utils.compareDates(dObj, new Date()),
isWeekend: this.utils.getDay(dObj) === 0 || this.utils.getDay(dObj) === 6,
isSaturday: this.utils.getDay(dObj) === 6,
isSunday: this.utils.getDay(dObj) === 0
});
this.utils.setDate(dObj, this.utils.getDate(dObj) + 1);
}
return days;
},
/**
* Gets the name of the month the current page is on
* @return {String}
*/
currMonthName: function currMonthName() {
var monthName = this.fullMonthName ? this.translation.months : this.translation.monthsAbbr;
return this.utils.getMonthNameAbbr(this.utils.getMonth(this.pageDate), monthName);
},
/**
* Gets the name of the year that current page is on
* @return {Number}
*/
currYearName: function currYearName() {
var yearSuffix = this.translation.yearSuffix;
return "".concat(this.utils.getFullYear(this.pageDate)).concat(yearSuffix);
},
/**
* Is this translation using year/month/day format?
* @return {Boolean}
*/
isYmd: function isYmd() {
return this.translation.ymd && this.translation.ymd === true;
},
/**
* Is the left hand navigation button disabled?
* @return {Boolean}
*/
isLeftNavDisabled: function isLeftNavDisabled() {
return this.isRtl ? this.isNextMonthDisabled(this.pageTimestamp) : this.isPreviousMonthDisabled(this.pageTimestamp);
},
/**
* Is the right hand navigation button disabled?
* @return {Boolean}
*/
isRightNavDisabled: function isRightNavDisabled() {
return this.isRtl ? this.isPreviousMonthDisabled(this.pageTimestamp) : this.isNextMonthDisabled(this.pageTimestamp);
}
},
methods: {
selectDate: function selectDate(date) {
if (date.isDisabled) {
this.$emit('selectedDisabled', date);
return false;
}
this.$emit('selectDate', date);
},
/**
* @return {Number}
*/
getPageMonth: function getPageMonth() {
return this.utils.getMonth(this.pageDate);
},
/**
* Emit an event to show the month picker
*/
showMonthCalendar: function showMonthCalendar() {
this.$emit('showMonthCalendar');
},
/**
* Change the page month
* @param {Number} incrementBy
*/
changeMonth: function changeMonth(incrementBy) {
var date = this.pageDate;
this.utils.setMonth(date, this.utils.getMonth(date) + incrementBy);
this.$emit('changedMonth', date);
},
/**
* Decrement the page month
*/
previousMonth: function previousMonth() {
if (!this.isPreviousMonthDisabled()) {
this.changeMonth(-1);
}
},
/**
* Is the previous month disabled?
* @return {Boolean}
*/
isPreviousMonthDisabled: function isPreviousMonthDisabled() {
if (!this.disabledDates || !this.disabledDates.to) {
return false;
}
var d = this.pageDate;
return this.utils.getMonth(this.disabledDates.to) >= this.utils.getMonth(d) && this.utils.getFullYear(this.disabledDates.to) >= this.utils.getFullYear(d);
},
/**
* Increment the current page month
*/
nextMonth: function nextMonth() {
if (!this.isNextMonthDisabled()) {
this.changeMonth(+1);
}
},
/**
* Is the next month disabled?
* @return {Boolean}
*/
isNextMonthDisabled: function isNextMonthDisabled() {
if (!this.disabledDates || !this.disabledDates.from) {
return false;
}
var d = this.pageDate;
return this.utils.getMonth(this.disabledDates.from) <= this.utils.getMonth(d) && this.utils.getFullYear(this.disabledDates.from) <= this.utils.getFullYear(d);
},
/**
* Whether a day is selected
* @param {Date}
* @return {Boolean}
*/
isSelectedDate: function isSelectedDate(dObj) {
return this.selectedDate && this.utils.compareDates(this.selectedDate, dObj);
},
/**
* Whether a day is disabled
* @param {Date}
* @return {Boolean}
*/
isDisabledDate: function isDisabledDate(date) {
var _this = this;
var disabledDates = false;
if (typeof this.disabledDates === 'undefined') {
return false;
}
if (typeof this.disabledDates.dates !== 'undefined') {
this.disabledDates.dates.forEach(function (d) {
if (_this.utils.compareDates(date, d)) {
disabledDates = true;
return true;
}
});
}
if (typeof this.disabledDates.to !== 'undefined' && this.disabledDates.to && date < this.disabledDates.to) {
disabledDates = true;
}
if (typeof this.disabledDates.from !== 'undefined' && this.disabledDates.from && date > this.disabledDates.from) {
disabledDates = true;
}
if (typeof this.disabledDates.ranges !== 'undefined') {
this.disabledDates.ranges.forEach(function (range) {
if (typeof range.from !== 'undefined' && range.from && typeof range.to !== 'undefined' && range.to) {
if (date < range.to && date > range.from) {
disabledDates = true;
return true;
}
}
});
}
if (typeof this.disabledDates.days !== 'undefined' && this.disabledDates.days.indexOf(this.utils.getDay(date)) !== -1) {
disabledDates = true;
}
if (typeof this.disabledDates.daysOfMonth !== 'undefined' && this.disabledDates.daysOfMonth.indexOf(this.utils.getDate(date)) !== -1) {
disabledDates = true;
}
if (typeof this.disabledDates.customPredictor === 'function' && this.disabledDates.customPredictor(date)) {
disabledDates = true;
}
return disabledDates;
},
/**
* Whether a day is highlighted (only if it is not disabled already except when highlighted.includeDisabled is true)
* @param {Date}
* @return {Boolean}
*/
isHighlightedDate: function isHighlightedDate(date) {
var _this2 = this;
if (!(this.highlighted && this.highlighted.includeDisabled) && this.isDisabledDate(date)) {
return false;
}
var highlighted = false;
if (typeof this.highlighted === 'undefined') {
return false;
}
if (typeof this.highlighted.dates !== 'undefined') {
this.highlighted.dates.forEach(function (d) {
if (_this2.utils.compareDates(date, d)) {
highlighted = true;
return true;
}
});
}
if (this.isDefined(this.highlighted.from) && this.isDefined(this.highlighted.to)) {
highlighted = date >= this.highlighted.from && date <= this.highlighted.to;
}
if (typeof this.highlighted.days !== 'undefined' && this.highlighted.days.indexOf(this.utils.getDay(date)) !== -1) {
highlighted = true;
}
if (typeof this.highlighted.daysOfMonth !== 'undefined' && this.highlighted.daysOfMonth.indexOf(this.utils.getDate(date)) !== -1) {
highlighted = true;
}
if (typeof this.highlighted.customPredictor === 'function' && this.highlighted.customPredictor(date)) {
highlighted = true;
}
return highlighted;
},
dayClasses: function dayClasses(day) {
return {
'selected': day.isSelected,
'disabled': day.isDisabled,
'highlighted': day.isHighlighted,
'today': day.isToday,
'weekend': day.isWeekend,
'sat': day.isSaturday,
'sun': day.isSunday,
'highlight-start': day.isHighlightStart,
'highlight-end': day.isHighlightEnd
};
},
/**
* Whether a day is highlighted and it is the first date
* in the highlighted range of dates
* @param {Date}
* @return {Boolean}
*/
isHighlightStart: function isHighlightStart(date) {
return this.isHighlightedDate(date) && this.highlighted.from instanceof Date && this.utils.getFullYear(this.highlighted.from) === this.utils.getFullYear(date) && this.utils.getMonth(this.highlighted.from) === this.utils.getMonth(date) && this.utils.getDate(this.highlighted.from) === this.utils.getDate(date);
},
/**
* Whether a day is highlighted and it is the first date
* in the highlighted range of dates
* @param {Date}
* @return {Boolean}
*/
isHighlightEnd: function isHighlightEnd(date) {
return this.isHighlightedDate(date) && this.highlighted.to instanceof Date && this.utils.getFullYear(this.highlighted.to) === this.utils.getFullYear(date) && this.utils.getMonth(this.highlighted.to) === this.utils.getMonth(date) && this.utils.getDate(this.highlighted.to) === this.utils.getDate(date);
},
/**
* Helper
* @param {mixed} prop
* @return {Boolean}
*/
isDefined: function isDefined(prop) {
return typeof prop !== 'undefined' && prop;
}
} // eslint-disable-next-line
};
/* script */
const __vue_script__$1 = script$1;
/* template */
var __vue_render__$1 = function() {
var _vm = this;
var _h = _vm.$createElement;
var _c = _vm._self._c || _h;
return _c(
"div",
{
directives: [
{
name: "show",
rawName: "v-show",
value: _vm.showDayView,
expression: "showDayView"
}
],
class: [_vm.calendarClass, "vdp-datepicker__calendar"],
style: _vm.calendarStyle,
on: {
mousedown: function($event) {
$event.preventDefault();
}
}
},
[
_vm._t("beforeCalendarHeader"),
_vm._v(" "),
_c("header", [
_c(
"span",
{
staticClass: "prev",
class: { disabled: _vm.isLeftNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.nextMonth() : _vm.previousMonth();
}
}
},
[_vm._v("<")]
),
_vm._v(" "),
_c(
"span",
{
staticClass: "day__month_btn",
class: _vm.allowedToShowView("month") ? "up" : "",
on: { click: _vm.showMonthCalendar }
},
[
_vm._v(
_vm._s(_vm.isYmd ? _vm.currYearName : _vm.currMonthName) +
" " +
_vm._s(_vm.isYmd ? _vm.currMonthName : _vm.currYearName)
)
]
),
_vm._v(" "),
_c(
"span",
{
staticClass: "next",
class: { disabled: _vm.isRightNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.previousMonth() : _vm.nextMonth();
}
}
},
[_vm._v(">")]
)
]),
_vm._v(" "),
_c(
"div",
{ class: _vm.isRtl ? "flex-rtl" : "" },
[
_vm._l(_vm.daysOfWeek, function(d) {
return _c(
"span",
{ key: d.timestamp, staticClass: "cell day-header" },
[_vm._v(_vm._s(d))]
)
}),
_vm._v(" "),
_vm.blankDays > 0
? _vm._l(_vm.blankDays, function(d) {
return _c("span", {
key: d.timestamp,
staticClass: "cell day blank"
})
})
: _vm._e(),
_vm._l(_vm.days, function(day) {
return _c("span", {
key: day.timestamp,
staticClass: "cell day",
class: _vm.dayClasses(day),
domProps: { innerHTML: _vm._s(_vm.dayCellContent(day)) },
on: {
click: function($event) {
return _vm.selectDate(day)
}
}
})
})
],
2
)
],
2
)
};
var __vue_staticRenderFns__$1 = [];
__vue_render__$1._withStripped = true;
/* style */
const __vue_inject_styles__$1 = undefined;
/* scoped */
const __vue_scope_id__$1 = undefined;
/* module identifier */
const __vue_module_identifier__$1 = undefined;
/* functional template */
const __vue_is_functional_template__$1 = false;
/* style inject */
/* style inject SSR */
var PickerDay = normalizeComponent_1(
{ render: __vue_render__$1, staticRenderFns: __vue_staticRenderFns__$1 },
__vue_inject_styles__$1,
__vue_script__$1,
__vue_scope_id__$1,
__vue_is_functional_template__$1,
__vue_module_identifier__$1,
undefined,
undefined
);
//
var script$2 = {
props: {
showMonthView: Boolean,
selectedDate: Date,
pageDate: Date,
pageTimestamp: Number,
disabledDates: Object,
calendarClass: [String, Object, Array],
calendarStyle: Object,
translation: Object,
isRtl: Boolean,
allowedToShowView: Function,
useUtc: Boolean
},
data: function data() {
var constructedDateUtils = makeDateUtils(this.useUtc);
return {
utils: constructedDateUtils
};
},
computed: {
months: function months() {
var d = this.pageDate;
var months = []; // set up a new date object to the beginning of the current 'page'
var dObj = this.useUtc ? new Date(Date.UTC(d.getUTCFullYear(), 0, d.getUTCDate())) : new Date(d.getFullYear(), 0, d.getDate(), d.getHours(), d.getMinutes());
for (var i = 0; i < 12; i++) {
months.push({
month: this.utils.getMonthName(i, this.translation.months),
timestamp: dObj.getTime(),
isSelected: this.isSelectedMonth(dObj),
isDisabled: this.isDisabledMonth(dObj)
});
this.utils.setMonth(dObj, this.utils.getMonth(dObj) + 1);
}
return months;
},
/**
* Get year name on current page.
* @return {String}
*/
pageYearName: function pageYearName() {
var yearSuffix = this.translation.yearSuffix;
return "".concat(this.utils.getFullYear(this.pageDate)).concat(yearSuffix);
},
/**
* Is the left hand navigation disabled
* @return {Boolean}
*/
isLeftNavDisabled: function isLeftNavDisabled() {
return this.isRtl ? this.isNextYearDisabled(this.pageTimestamp) : this.isPreviousYearDisabled(this.pageTimestamp);
},
/**
* Is the right hand navigation disabled
* @return {Boolean}
*/
isRightNavDisabled: function isRightNavDisabled() {
return this.isRtl ? this.isPreviousYearDisabled(this.pageTimestamp) : this.isNextYearDisabled(this.pageTimestamp);
}
},
methods: {
/**
* Emits a selectMonth event
* @param {Object} month
*/
selectMonth: function selectMonth(month) {
if (month.isDisabled) {
return false;
}
this.$emit('selectMonth', month);
},
/**
* Changes the year up or down
* @param {Number} incrementBy
*/
changeYear: function changeYear(incrementBy) {
var date = this.pageDate;
this.utils.setFullYear(date, this.utils.getFullYear(date) + incrementBy);
this.$emit('changedYear', date);
},
/**
* Decrements the year
*/
previousYear: function previousYear() {
if (!this.isPreviousYearDisabled()) {
this.changeYear(-1);
}
},
/**
* Checks if the previous year is disabled or not
* @return {Boolean}
*/
isPreviousYearDisabled: function isPreviousYearDisabled() {
if (!this.disabledDates || !this.disabledDates.to) {
return false;
}
return this.utils.getFullYear(this.disabledDates.to) >= this.utils.getFullYear(this.pageDate);
},
/**
* Increments the year
*/
nextYear: function nextYear() {
if (!this.isNextYearDisabled()) {
this.changeYear(1);
}
},
/**
* Checks if the next year is disabled or not
* @return {Boolean}
*/
isNextYearDisabled: function isNextYearDisabled() {
if (!this.disabledDates || !this.disabledDates.from) {
return false;
}
return this.utils.getFullYear(this.disabledDates.from) <= this.utils.getFullYear(this.pageDate);
},
/**
* Emits an event that shows the year calendar
*/
showYearCalendar: function showYearCalendar() {
this.$emit('showYearCalendar');
},
/**
* Whether the selected date is in this month
* @param {Date}
* @return {Boolean}
*/
isSelectedMonth: function isSelectedMonth(date) {
return this.selectedDate && this.utils.getFullYear(this.selectedDate) === this.utils.getFullYear(date) && this.utils.getMonth(this.selectedDate) === this.utils.getMonth(date);
},
/**
* Whether a month is disabled
* @param {Date}
* @return {Boolean}
*/
isDisabledMonth: function isDisabledMonth(date) {
var disabledDates = false;
if (typeof this.disabledDates === 'undefined') {
return false;
}
if (typeof this.disabledDates.to !== 'undefined' && this.disabledDates.to) {
if (this.utils.getMonth(date) < this.utils.getMonth(this.disabledDates.to) && this.utils.getFullYear(date) <= this.utils.getFullYear(this.disabledDates.to) || this.utils.getFullYear(date) < this.utils.getFullYear(this.disabledDates.to)) {
disabledDates = true;
}
}
if (typeof this.disabledDates.from !== 'undefined' && this.disabledDates.from) {
if (this.utils.getMonth(date) > this.utils.getMonth(this.disabledDates.from) && this.utils.getFullYear(date) >= this.utils.getFullYear(this.disabledDates.from) || this.utils.getFullYear(date) > this.utils.getFullYear(this.disabledDates.from)) {
disabledDates = true;
}
}
if (typeof this.disabledDates.customPredictor === 'function' && this.disabledDates.customPredictor(date)) {
disabledDates = true;
}
return disabledDates;
}
} // eslint-disable-next-line
};
/* script */
const __vue_script__$2 = script$2;
/* template */
var __vue_render__$2 = function() {
var _vm = this;
var _h = _vm.$createElement;
var _c = _vm._self._c || _h;
return _c(
"div",
{
directives: [
{
name: "show",
rawName: "v-show",
value: _vm.showMonthView,
expression: "showMonthView"
}
],
class: [_vm.calendarClass, "vdp-datepicker__calendar"],
style: _vm.calendarStyle,
on: {
mousedown: function($event) {
$event.preventDefault();
}
}
},
[
_vm._t("beforeCalendarHeader"),
_vm._v(" "),
_c("header", [
_c(
"span",
{
staticClass: "prev",
class: { disabled: _vm.isLeftNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.nextYear() : _vm.previousYear();
}
}
},
[_vm._v("<")]
),
_vm._v(" "),
_c(
"span",
{
staticClass: "month__year_btn",
class: _vm.allowedToShowView("year") ? "up" : "",
on: { click: _vm.showYearCalendar }
},
[_vm._v(_vm._s(_vm.pageYearName))]
),
_vm._v(" "),
_c(
"span",
{
staticClass: "next",
class: { disabled: _vm.isRightNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.previousYear() : _vm.nextYear();
}
}
},
[_vm._v(">")]
)
]),
_vm._v(" "),
_vm._l(_vm.months, function(month) {
return _c(
"span",
{
key: month.timestamp,
staticClass: "cell month",
class: { selected: month.isSelected, disabled: month.isDisabled },
on: {
click: function($event) {
$event.stopPropagation();
return _vm.selectMonth(month)
}
}
},
[_vm._v(_vm._s(month.month))]
)
})
],
2
)
};
var __vue_staticRenderFns__$2 = [];
__vue_render__$2._withStripped = true;
/* style */
const __vue_inject_styles__$2 = undefined;
/* scoped */
const __vue_scope_id__$2 = undefined;
/* module identifier */
const __vue_module_identifier__$2 = undefined;
/* functional template */
const __vue_is_functional_template__$2 = false;
/* style inject */
/* style inject SSR */
var PickerMonth = normalizeComponent_1(
{ render: __vue_render__$2, staticRenderFns: __vue_staticRenderFns__$2 },
__vue_inject_styles__$2,
__vue_script__$2,
__vue_scope_id__$2,
__vue_is_functional_template__$2,
__vue_module_identifier__$2,
undefined,
undefined
);
//
var script$3 = {
props: {
showYearView: Boolean,
selectedDate: Date,
pageDate: Date,
pageTimestamp: Number,
disabledDates: Object,
highlighted: Object,
calendarClass: [String, Object, Array],
calendarStyle: Object,
translation: Object,
isRtl: Boolean,
allowedToShowView: Function,
useUtc: Boolean
},
computed: {
years: function years() {
var d = this.pageDate;
var years = []; // set up a new date object to the beginning of the current 'page'7
var dObj = this.useUtc ? new Date(Date.UTC(Math.floor(d.getUTCFullYear() / 10) * 10, d.getUTCMonth(), d.getUTCDate())) : new Date(Math.floor(d.getFullYear() / 10) * 10, d.getMonth(), d.getDate(), d.getHours(), d.getMinutes());
for (var i = 0; i < 10; i++) {
years.push({
year: this.utils.getFullYear(dObj),
timestamp: dObj.getTime(),
isSelected: this.isSelectedYear(dObj),
isDisabled: this.isDisabledYear(dObj)
});
this.utils.setFullYear(dObj, this.utils.getFullYear(dObj) + 1);
}
return years;
},
/**
* @return {String}
*/
getPageDecade: function getPageDecade() {
var decadeStart = Math.floor(this.utils.getFullYear(this.pageDate) / 10) * 10;
var decadeEnd = decadeStart + 9;
var yearSuffix = this.translation.yearSuffix;
return "".concat(decadeStart, " - ").concat(decadeEnd).concat(yearSuffix);
},
/**
* Is the left hand navigation button disabled?
* @return {Boolean}
*/
isLeftNavDisabled: function isLeftNavDisabled() {
return this.isRtl ? this.isNextDecadeDisabled(this.pageTimestamp) : this.isPreviousDecadeDisabled(this.pageTimestamp);
},
/**
* Is the right hand navigation button disabled?
* @return {Boolean}
*/
isRightNavDisabled: function isRightNavDisabled() {
return this.isRtl ? this.isPreviousDecadeDisabled(this.pageTimestamp) : this.isNextDecadeDisabled(this.pageTimestamp);
}
},
data: function data() {
var constructedDateUtils = makeDateUtils(this.useUtc);
return {
utils: constructedDateUtils
};
},
methods: {
selectYear: function selectYear(year) {
if (year.isDisabled) {
return false;
}
this.$emit('selectYear', year);
},
changeYear: function changeYear(incrementBy) {
var date = this.pageDate;
this.utils.setFullYear(date, this.utils.getFullYear(date) + incrementBy);
this.$emit('changedDecade', date);
},
previousDecade: function previousDecade() {
if (this.isPreviousDecadeDisabled()) {
return false;
}
this.changeYear(-10);
},
isPreviousDecadeDisabled: function isPreviousDecadeDisabled() {
if (!this.disabledDates || !this.disabledDates.to) {
return false;
}
var disabledYear = this.utils.getFullYear(this.disabledDates.to);
var lastYearInPreviousPage = Math.floor(this.utils.getFullYear(this.pageDate) / 10) * 10 - 1;
return disabledYear > lastYearInPreviousPage;
},
nextDecade: function nextDecade() {
if (this.isNextDecadeDisabled()) {
return false;
}
this.changeYear(10);
},
isNextDecadeDisabled: function isNextDecadeDisabled() {
if (!this.disabledDates || !this.disabledDates.from) {
return false;
}
var disabledYear = this.utils.getFullYear(this.disabledDates.from);
var firstYearInNextPage = Math.ceil(this.utils.getFullYear(this.pageDate) / 10) * 10;
return disabledYear < firstYearInNextPage;
},
/**
* Whether the selected date is in this year
* @param {Date}
* @return {Boolean}
*/
isSelectedYear: function isSelectedYear(date) {
return this.selectedDate && this.utils.getFullYear(this.selectedDate) === this.utils.getFullYear(date);
},
/**
* Whether a year is disabled
* @param {Date}
* @return {Boolean}
*/
isDisabledYear: function isDisabledYear(date) {
var disabledDates = false;
if (typeof this.disabledDates === 'undefined' || !this.disabledDates) {
return false;
}
if (typeof this.disabledDates.to !== 'undefined' && this.disabledDates.to) {
if (this.utils.getFullYear(date) < this.utils.getFullYear(this.disabledDates.to)) {
disabledDates = true;
}
}
if (typeof this.disabledDates.from !== 'undefined' && this.disabledDates.from) {
if (this.utils.getFullYear(date) > this.utils.getFullYear(this.disabledDates.from)) {
disabledDates = true;
}
}
if (typeof this.disabledDates.customPredictor === 'function' && this.disabledDates.customPredictor(date)) {
disabledDates = true;
}
return disabledDates;
}
} // eslint-disable-next-line
};
/* script */
const __vue_script__$3 = script$3;
/* template */
var __vue_render__$3 = function() {
var _vm = this;
var _h = _vm.$createElement;
var _c = _vm._self._c || _h;
return _c(
"div",
{
directives: [
{
name: "show",
rawName: "v-show",
value: _vm.showYearView,
expression: "showYearView"
}
],
class: [_vm.calendarClass, "vdp-datepicker__calendar"],
style: _vm.calendarStyle,
on: {
mousedown: function($event) {
$event.preventDefault();
}
}
},
[
_vm._t("beforeCalendarHeader"),
_vm._v(" "),
_c("header", [
_c(
"span",
{
staticClass: "prev",
class: { disabled: _vm.isLeftNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.nextDecade() : _vm.previousDecade();
}
}
},
[_vm._v("<")]
),
_vm._v(" "),
_c("span", [_vm._v(_vm._s(_vm.getPageDecade))]),
_vm._v(" "),
_c(
"span",
{
staticClass: "next",
class: { disabled: _vm.isRightNavDisabled },
on: {
click: function($event) {
_vm.isRtl ? _vm.previousDecade() : _vm.nextDecade();
}
}
},
[_vm._v(">")]
)
]),
_vm._v(" "),
_vm._l(_vm.years, function(year) {
return _c(
"span",
{
key: year.timestamp,
staticClass: "cell year",
class: { selected: year.isSelected, disabled: year.isDisabled },
on: {
click: function($event) {
$event.stopPropagation();
return _vm.selectYear(year)
}
}
},
[_vm._v(_vm._s(year.year))]
)
})
],
2
)
};
var __vue_staticRenderFns__$3 = [];
__vue_render__$3._withStripped = true;
/* style */
const __vue_inject_styles__$3 = undefined;
/* scoped */
const __vue_scope_id__$3 = undefined;
/* module identifier */
const __vue_module_identifier__$3 = undefined;
/* functional template */
const __vue_is_functional_template__$3 = false;
/* style inject */
/* style inject SSR */
var PickerYear = normalizeComponent_1(
{ render: __vue_render__$3, staticRenderFns: __vue_staticRenderFns__$3 },
__vue_inject_styles__$3,
__vue_script__$3,
__vue_scope_id__$3,
__vue_is_functional_template__$3,
__vue_module_identifier__$3,
undefined,
undefined
);
//
var script$4 = {
components: {
DateInput: DateInput,
PickerDay: PickerDay,
PickerMonth: PickerMonth,
PickerYear: PickerYear
},
props: {
value: {
validator: function validator(val) {
return utils$1.validateDateInput(val);
}
},
name: String,
refName: String,
id: String,
format: {
type: [String, Function],
"default": 'dd MMM yyyy'
},
language: {
type: Object,
"default": function _default() {
return en;
}
},
openDate: {
validator: function validator(val) {
return utils$1.validateDateInput(val);
}
},
dayCellContent: Function,
fullMonthName: Boolean,
disabledDates: Object,
highlighted: Object,
placeholder: String,
inline: Boolean,
calendarClass: [String, Object, Array],
inputClass: [String, Object, Array],
wrapperClass: [String, Object, Array],
mondayFirst: Boolean,
clearButton: Boolean,
clearButtonIcon: String,
calendarButton: Boolean,
calendarButtonIcon: String,
calendarButtonIconContent: String,
bootstrapStyling: Boolean,
initialView: String,
disabled: Boolean,
required: Boolean,
typeable: Boolean,
useUtc: Boolean,
minimumView: {
type: String,
"default": 'day'
},
maximumView: {
type: String,
"default": 'year'
}
},
data: function data() {
var startDate = this.openDate ? new Date(this.openDate) : new Date();
var constructedDateUtils = makeDateUtils(this.useUtc);
var pageTimestamp = constructedDateUtils.setDate(startDate, 1);
return {
/*
* Vue cannot observe changes to a Date Object so date must be stored as a timestamp
* This represents the first day of the current viewing month
* {Number}
*/
pageTimestamp: pageTimestamp,
/*
* Selected Date
* {Date}
*/
selectedDate: null,
/*
* Flags to show calendar views
* {Boolean}
*/
showDayView: false,
showMonthView: false,
showYearView: false,
/*
* Positioning
*/
calendarHeight: 0,
resetTypedDate: new Date(),
utils: constructedDateUtils
};
},
watch: {
value: function value(_value) {
this.setValue(_value);
},
openDate: function openDate() {
this.setPageDate();
},
initialView: function initialView() {
this.setInitialView();
}
},
computed: {
computedInitialView: function computedInitialView() {
if (!this.initialView) {
return this.minimumView;
}
return this.initialView;
},
pageDate: function pageDate() {
return new Date(this.pageTimestamp);
},
translation: function translation() {
return this.language;
},
calendarStyle: function calendarStyle() {
return {
position: this.isInline ? 'static' : undefined
};
},
isOpen: function isOpen() {
return this.showDayView || this.showMonthView || this.showYearView;
},
isInline: function isInline() {
return !!this.inline;
},
isRtl: function isRtl() {
return this.translation.rtl === true;
}
},
methods: {
/**
* Called in the event that the user navigates to date pages and
* closes the picker without selecting a date.
*/
resetDefaultPageDate: function resetDefaultPageDate() {
if (this.selectedDate === null) {
this.setPageDate();
return;
}
this.setPageDate(this.selectedDate);
},
/**
* Effectively a toggle to show/hide the calendar
* @return {mixed}
*/
showCalendar: function showCalendar() {
if (this.disabled || this.isInline) {
return false;
}
if (this.isOpen) {
return this.close(true);
}
this.setInitialView();
},
/**
* Sets the initial picker page view: day, month or year
*/
setInitialView: function setInitialView() {
var initialView = this.computedInitialView;
if (!this.allowedToShowView(initialView)) {
throw new Error("initialView '".concat(this.initialView, "' cannot be rendered based on minimum '").concat(this.minimumView, "' and maximum '").concat(this.maximumView, "'"));
}
switch (initialView) {
case 'year':
this.showYearCalendar();
break;
case 'month':
this.showMonthCalendar();
break;
default:
this.showDayCalendar();
break;
}
},
/**
* Are we allowed to show a specific picker view?
* @param {String} view
* @return {Boolean}
*/
allowedToShowView: function allowedToShowView(view) {
var views = ['day', 'month', 'year'];
var minimumViewIndex = views.indexOf(this.minimumView);
var maximumViewIndex = views.indexOf(this.maximumView);
var viewIndex = views.indexOf(view);
return viewIndex >= minimumViewIndex && viewIndex <= maximumViewIndex;
},
/**
* Show the day picker
* @return {Boolean}
*/
showDayCalendar: function showDayCalendar() {
if (!this.allowedToShowView('day')) {
return false;
}
this.close();
this.showDayView = true;
return true;
},
/**
* Show the month picker
* @return {Boolean}
*/
showMonthCalendar: function showMonthCalendar() {
if (!this.allowedToShowView('month')) {
return false;
}
this.close();
this.showMonthView = true;
return true;
},
/**
* Show the year picker
* @return {Boolean}
*/
showYearCalendar: function showYearCalendar() {
if (!this.allowedToShowView('year')) {
return false;
}
this.close();
this.showYearView = true;
return true;
},
/**
* Set the selected date
* @param {Number} timestamp
*/
setDate: function setDate(timestamp) {
var date = new Date(timestamp);
this.selectedDate = date;
this.setPageDate(date);
this.$emit('selected', date);
this.$emit('input', date);
},
/**
* Clear the selected date
*/
clearDate: function clearDate() {
this.selectedDate = null;
this.setPageDate();
this.$emit('selected', null);
this.$emit('input', null);
this.$emit('cleared');
},
/**
* @param {Object} date
*/
selectDate: function selectDate(date) {
this.setDate(date.timestamp);
if (!this.isInline) {
this.close(true);
}
this.resetTypedDate = new Date();
},
/**
* @param {Object} date
*/
selectDisabledDate: function selectDisabledDate(date) {
this.$emit('selectedDisabled', date);
},
/**
* @param {Object} month
*/
selectMonth: function selectMonth(month) {
var date = new Date(month.timestamp);
if (this.allowedToShowView('day')) {
this.setPageDate(date);
this.$emit('changedMonth', month);
this.showDayCalendar();
} else {
this.selectDate(month);
}
},
/**
* @param {Object} year
*/
selectYear: function selectYear(year) {
var date = new Date(year.timestamp);
if (this.allowedToShowView('month')) {
this.setPageDate(date);
this.$emit('changedYear', year);
this.showMonthCalendar();
} else {
this.selectDate(year);
}
},
/**
* Set the datepicker value
* @param {Date|String|Number|null} date
*/
setValue: function setValue(date) {
if (typeof date === 'string' || typeof date === 'number') {
var parsed = new Date(date);
date = isNaN(parsed.valueOf()) ? null : parsed;
}
if (!date) {
this.setPageDate();
this.selectedDate = null;
return;
}
this.selectedDate = date;
this.setPageDate(date);
},
/**
* Sets the date that the calendar should open on
*/
setPageDate: function setPageDate(date) {
if (!date) {
if (this.openDate) {
date = new Date(this.openDate);
} else {
date = new Date();
}
}
this.pageTimestamp = this.utils.setDate(new Date(date), 1);
},
/**
* Handles a month change from the day picker
*/
handleChangedMonthFromDayPicker: function handleChangedMonthFromDayPicker(date) {
this.setPageDate(date);
this.$emit('changedMonth', date);
},
/**
* Set the date from a typedDate event
*/
setTypedDate: function setTypedDate(date) {
this.setDate(date.getTime());
},
/**
* Close all calendar layers
* @param {Boolean} emitEvent - emit close event
*/
close: function close(emitEvent) {
this.showDayView = this.showMonthView = this.showYearView = false;
if (!this.isInline) {
if (emitEvent) {
this.$emit('closed');
}
document.removeEventListener('click', this.clickOutside, false);
}
},
/**
* Initiate the component
*/
init: function init() {
if (this.value) {
this.setValue(this.value);
}
if (this.isInline) {
this.setInitialView();
}
}
},
mounted: function mounted() {
this.init();
}
} // eslint-disable-next-line
;
var isOldIE = typeof navigator !== 'undefined' && /msie [6-9]\\b/.test(navigator.userAgent.toLowerCase());
function createInjector(context) {
return function (id, style) {
return addStyle(id, style);
};
}
var HEAD = document.head || document.getElementsByTagName('head')[0];
var styles = {};
function addStyle(id, css) {
var group = isOldIE ? css.media || 'default' : id;
var style = styles[group] || (styles[group] = {
ids: new Set(),
styles: []
});
if (!style.ids.has(id)) {
style.ids.add(id);
var code = css.source;
if (css.map) {
// https://developer.chrome.com/devtools/docs/javascript-debugging
// this makes source maps inside style tags work properly in Chrome
code += '\n/*# sourceURL=' + css.map.sources[0] + ' */'; // http://stackoverflow.com/a/26603875
code += '\n/*# sourceMappingURL=data:application/json;base64,' + btoa(unescape(encodeURIComponent(JSON.stringify(css.map)))) + ' */';
}
if (!style.element) {
style.element = document.createElement('style');
style.element.type = 'text/css';
if (css.media) style.element.setAttribute('media', css.media);
HEAD.appendChild(style.element);
}
if ('styleSheet' in style.element) {
style.styles.push(code);
style.element.styleSheet.cssText = style.styles.filter(Boolean).join('\n');
} else {
var index = style.ids.size - 1;
var textNode = document.createTextNode(code);
var nodes = style.element.childNodes;
if (nodes[index]) style.element.removeChild(nodes[index]);
if (nodes.length) style.element.insertBefore(textNode, nodes[index]);else style.element.appendChild(textNode);
}
}
}
var browser = createInjector;
/* script */
const __vue_script__$4 = script$4;
/* template */
var __vue_render__$4 = function() {
var _vm = this;
var _h = _vm.$createElement;
var _c = _vm._self._c || _h;
return _c(
"div",
{
staticClass: "vdp-datepicker",
class: [_vm.wrapperClass, _vm.isRtl ? "rtl" : ""]
},
[
_c(
"date-input",
{
attrs: {
selectedDate: _vm.selectedDate,
resetTypedDate: _vm.resetTypedDate,
format: _vm.format,
translation: _vm.translation,
inline: _vm.inline,
id: _vm.id,
name: _vm.name,
refName: _vm.refName,
openDate: _vm.openDate,
placeholder: _vm.placeholder,
inputClass: _vm.inputClass,
typeable: _vm.typeable,
clearButton: _vm.clearButton,
clearButtonIcon: _vm.clearButtonIcon,
calendarButton: _vm.calendarButton,
calendarButtonIcon: _vm.calendarButtonIcon,
calendarButtonIconContent: _vm.calendarButtonIconContent,
disabled: _vm.disabled,
required: _vm.required,
bootstrapStyling: _vm.bootstrapStyling,
"use-utc": _vm.useUtc
},
on: {
showCalendar: _vm.showCalendar,
closeCalendar: _vm.close,
typedDate: _vm.setTypedDate,
clearDate: _vm.clearDate
}
},
[_vm._t("afterDateInput", null, { slot: "afterDateInput" })],
2
),
_vm._v(" "),
_vm.allowedToShowView("day")
? _c(
"picker-day",
{
attrs: {
pageDate: _vm.pageDate,
selectedDate: _vm.selectedDate,
showDayView: _vm.showDayView,
fullMonthName: _vm.fullMonthName,
allowedToShowView: _vm.allowedToShowView,
disabledDates: _vm.disabledDates,
highlighted: _vm.highlighted,
calendarClass: _vm.calendarClass,
calendarStyle: _vm.calendarStyle,
translation: _vm.translation,
pageTimestamp: _vm.pageTimestamp,
isRtl: _vm.isRtl,
mondayFirst: _vm.mondayFirst,
dayCellContent: _vm.dayCellContent,
"use-utc": _vm.useUtc
},
on: {
changedMonth: _vm.handleChangedMonthFromDayPicker,
selectDate: _vm.selectDate,
showMonthCalendar: _vm.showMonthCalendar,
selectedDisabled: _vm.selectDisabledDate
}
},
[
_vm._t("beforeCalendarHeader", null, {
slot: "beforeCalendarHeader"
})
],
2
)
: _vm._e(),
_vm._v(" "),
_vm.allowedToShowView("month")
? _c(
"picker-month",
{
attrs: {
pageDate: _vm.pageDate,
selectedDate: _vm.selectedDate,
showMonthView: _vm.showMonthView,
allowedToShowView: _vm.allowedToShowView,
disabledDates: _vm.disabledDates,
calendarClass: _vm.calendarClass,
calendarStyle: _vm.calendarStyle,
translation: _vm.translation,
isRtl: _vm.isRtl,
"use-utc": _vm.useUtc
},
on: {
selectMonth: _vm.selectMonth,
showYearCalendar: _vm.showYearCalendar,
changedYear: _vm.setPageDate
}
},
[
_vm._t("beforeCalendarHeader", null, {
slot: "beforeCalendarHeader"
})
],
2
)
: _vm._e(),
_vm._v(" "),
_vm.allowedToShowView("year")
? _c(
"picker-year",
{
attrs: {
pageDate: _vm.pageDate,
selectedDate: _vm.selectedDate,
showYearView: _vm.showYearView,
allowedToShowView: _vm.allowedToShowView,
disabledDates: _vm.disabledDates,
calendarClass: _vm.calendarClass,
calendarStyle: _vm.calendarStyle,
translation: _vm.translation,
isRtl: _vm.isRtl,
"use-utc": _vm.useUtc
},
on: { selectYear: _vm.selectYear, changedDecade: _vm.setPageDate }
},
[
_vm._t("beforeCalendarHeader", null, {
slot: "beforeCalendarHeader"
})
],
2
)
: _vm._e()
],
1
)
};
var __vue_staticRenderFns__$4 = [];
__vue_render__$4._withStripped = true;
/* style */
const __vue_inject_styles__$4 = function (inject) {
if (!inject) return
inject("data-v-5e9b5ce6_0", { source: ".rtl {\n direction: rtl;\n}\n.vdp-datepicker {\n position: relative;\n text-align: left;\n}\n.vdp-datepicker * {\n box-sizing: border-box;\n}\n.vdp-datepicker__calendar {\n position: absolute;\n z-index: 100;\n background: #fff;\n width: 300px;\n border: 1px solid #ccc;\n}\n.vdp-datepicker__calendar header {\n display: block;\n line-height: 40px;\n}\n.vdp-datepicker__calendar header span {\n display: inline-block;\n text-align: center;\n width: 71.42857142857143%;\n float: left;\n}\n.vdp-datepicker__calendar header .prev,\n.vdp-datepicker__calendar header .next {\n width: 14.285714285714286%;\n float: left;\n text-indent: -10000px;\n position: relative;\n}\n.vdp-datepicker__calendar header .prev:after,\n.vdp-datepicker__calendar header .next:after {\n content: '';\n position: absolute;\n left: 50%;\n top: 50%;\n transform: translateX(-50%) translateY(-50%);\n border: 6px solid transparent;\n}\n.vdp-datepicker__calendar header .prev:after {\n border-right: 10px solid #000;\n margin-left: -5px;\n}\n.vdp-datepicker__calendar header .prev.disabled:after {\n border-right: 10px solid #ddd;\n}\n.vdp-datepicker__calendar header .next:after {\n border-left: 10px solid #000;\n margin-left: 5px;\n}\n.vdp-datepicker__calendar header .next.disabled:after {\n border-left: 10px solid #ddd;\n}\n.vdp-datepicker__calendar header .prev:not(.disabled),\n.vdp-datepicker__calendar header .next:not(.disabled),\n.vdp-datepicker__calendar header .up:not(.disabled) {\n cursor: pointer;\n}\n.vdp-datepicker__calendar header .prev:not(.disabled):hover,\n.vdp-datepicker__calendar header .next:not(.disabled):hover,\n.vdp-datepicker__calendar header .up:not(.disabled):hover {\n background: #eee;\n}\n.vdp-datepicker__calendar .disabled {\n color: #ddd;\n cursor: default;\n}\n.vdp-datepicker__calendar .flex-rtl {\n display: flex;\n width: inherit;\n flex-wrap: wrap;\n}\n.vdp-datepicker__calendar .cell {\n display: inline-block;\n padding: 0 5px;\n width: 14.285714285714286%;\n height: 40px;\n line-height: 40px;\n text-align: center;\n vertical-align: middle;\n border: 1px solid transparent;\n}\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).day,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).month,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).year {\n cursor: pointer;\n}\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).day:hover,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).month:hover,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).year:hover {\n border: 1px solid #4bd;\n}\n.vdp-datepicker__calendar .cell.selected {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.selected:hover {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.selected.highlighted {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.highlighted {\n background: #cae5ed;\n}\n.vdp-datepicker__calendar .cell.highlighted.disabled {\n color: #a3a3a3;\n}\n.vdp-datepicker__calendar .cell.grey {\n color: #888;\n}\n.vdp-datepicker__calendar .cell.grey:hover {\n background: inherit;\n}\n.vdp-datepicker__calendar .cell.day-header {\n font-size: 75%;\n white-space: nowrap;\n cursor: inherit;\n}\n.vdp-datepicker__calendar .cell.day-header:hover {\n background: inherit;\n}\n.vdp-datepicker__calendar .month,\n.vdp-datepicker__calendar .year {\n width: 33.333%;\n}\n.vdp-datepicker__clear-button,\n.vdp-datepicker__calendar-button {\n cursor: pointer;\n font-style: normal;\n}\n.vdp-datepicker__clear-button.disabled,\n.vdp-datepicker__calendar-button.disabled {\n color: #999;\n cursor: default;\n}\n", map: {"version":3,"sources":["Datepicker.vue"],"names":[],"mappings":"AAAA;EACE,cAAc;AAChB;AACA;EACE,kBAAkB;EAClB,gBAAgB;AAClB;AACA;EACE,sBAAsB;AACxB;AACA;EACE,kBAAkB;EAClB,YAAY;EACZ,gBAAgB;EAChB,YAAY;EACZ,sBAAsB;AACxB;AACA;EACE,cAAc;EACd,iBAAiB;AACnB;AACA;EACE,qBAAqB;EACrB,kBAAkB;EAClB,yBAAyB;EACzB,WAAW;AACb;AACA;;EAEE,0BAA0B;EAC1B,WAAW;EACX,qBAAqB;EACrB,kBAAkB;AACpB;AACA;;EAEE,WAAW;EACX,kBAAkB;EAClB,SAAS;EACT,QAAQ;EACR,4CAA4C;EAC5C,6BAA6B;AAC/B;AACA;EACE,6BAA6B;EAC7B,iBAAiB;AACnB;AACA;EACE,6BAA6B;AAC/B;AACA;EACE,4BAA4B;EAC5B,gBAAgB;AAClB;AACA;EACE,4BAA4B;AAC9B;AACA;;;EAGE,eAAe;AACjB;AACA;;;EAGE,gBAAgB;AAClB;AACA;EACE,WAAW;EACX,eAAe;AACjB;AACA;EACE,aAAa;EACb,cAAc;EACd,eAAe;AACjB;AACA;EACE,qBAAqB;EACrB,cAAc;EACd,0BAA0B;EAC1B,YAAY;EACZ,iBAAiB;EACjB,kBAAkB;EAClB,sBAAsB;EACtB,6BAA6B;AAC/B;AACA;;;EAGE,eAAe;AACjB;AACA;;;EAGE,sBAAsB;AACxB;AACA;EACE,gBAAgB;AAClB;AACA;EACE,gBAAgB;AAClB;AACA;EACE,gBAAgB;AAClB;AACA;EACE,mBAAmB;AACrB;AACA;EACE,cAAc;AAChB;AACA;EACE,WAAW;AACb;AACA;EACE,mBAAmB;AACrB;AACA;EACE,cAAc;EACd,mBAAmB;EACnB,eAAe;AACjB;AACA;EACE,mBAAmB;AACrB;AACA;;EAEE,cAAc;AAChB;AACA;;EAEE,eAAe;EACf,kBAAkB;AACpB;AACA;;EAEE,WAAW;EACX,eAAe;AACjB","file":"Datepicker.vue","sourcesContent":[".rtl {\n direction: rtl;\n}\n.vdp-datepicker {\n position: relative;\n text-align: left;\n}\n.vdp-datepicker * {\n box-sizing: border-box;\n}\n.vdp-datepicker__calendar {\n position: absolute;\n z-index: 100;\n background: #fff;\n width: 300px;\n border: 1px solid #ccc;\n}\n.vdp-datepicker__calendar header {\n display: block;\n line-height: 40px;\n}\n.vdp-datepicker__calendar header span {\n display: inline-block;\n text-align: center;\n width: 71.42857142857143%;\n float: left;\n}\n.vdp-datepicker__calendar header .prev,\n.vdp-datepicker__calendar header .next {\n width: 14.285714285714286%;\n float: left;\n text-indent: -10000px;\n position: relative;\n}\n.vdp-datepicker__calendar header .prev:after,\n.vdp-datepicker__calendar header .next:after {\n content: '';\n position: absolute;\n left: 50%;\n top: 50%;\n transform: translateX(-50%) translateY(-50%);\n border: 6px solid transparent;\n}\n.vdp-datepicker__calendar header .prev:after {\n border-right: 10px solid #000;\n margin-left: -5px;\n}\n.vdp-datepicker__calendar header .prev.disabled:after {\n border-right: 10px solid #ddd;\n}\n.vdp-datepicker__calendar header .next:after {\n border-left: 10px solid #000;\n margin-left: 5px;\n}\n.vdp-datepicker__calendar header .next.disabled:after {\n border-left: 10px solid #ddd;\n}\n.vdp-datepicker__calendar header .prev:not(.disabled),\n.vdp-datepicker__calendar header .next:not(.disabled),\n.vdp-datepicker__calendar header .up:not(.disabled) {\n cursor: pointer;\n}\n.vdp-datepicker__calendar header .prev:not(.disabled):hover,\n.vdp-datepicker__calendar header .next:not(.disabled):hover,\n.vdp-datepicker__calendar header .up:not(.disabled):hover {\n background: #eee;\n}\n.vdp-datepicker__calendar .disabled {\n color: #ddd;\n cursor: default;\n}\n.vdp-datepicker__calendar .flex-rtl {\n display: flex;\n width: inherit;\n flex-wrap: wrap;\n}\n.vdp-datepicker__calendar .cell {\n display: inline-block;\n padding: 0 5px;\n width: 14.285714285714286%;\n height: 40px;\n line-height: 40px;\n text-align: center;\n vertical-align: middle;\n border: 1px solid transparent;\n}\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).day,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).month,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).year {\n cursor: pointer;\n}\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).day:hover,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).month:hover,\n.vdp-datepicker__calendar .cell:not(.blank):not(.disabled).year:hover {\n border: 1px solid #4bd;\n}\n.vdp-datepicker__calendar .cell.selected {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.selected:hover {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.selected.highlighted {\n background: #4bd;\n}\n.vdp-datepicker__calendar .cell.highlighted {\n background: #cae5ed;\n}\n.vdp-datepicker__calendar .cell.highlighted.disabled {\n color: #a3a3a3;\n}\n.vdp-datepicker__calendar .cell.grey {\n color: #888;\n}\n.vdp-datepicker__calendar .cell.grey:hover {\n background: inherit;\n}\n.vdp-datepicker__calendar .cell.day-header {\n font-size: 75%;\n white-space: nowrap;\n cursor: inherit;\n}\n.vdp-datepicker__calendar .cell.day-header:hover {\n background: inherit;\n}\n.vdp-datepicker__calendar .month,\n.vdp-datepicker__calendar .year {\n width: 33.333%;\n}\n.vdp-datepicker__clear-button,\n.vdp-datepicker__calendar-button {\n cursor: pointer;\n font-style: normal;\n}\n.vdp-datepicker__clear-button.disabled,\n.vdp-datepicker__calendar-button.disabled {\n color: #999;\n cursor: default;\n}\n"]}, media: undefined });
};
/* scoped */
const __vue_scope_id__$4 = undefined;
/* module identifier */
const __vue_module_identifier__$4 = undefined;
/* functional template */
const __vue_is_functional_template__$4 = false;
/* style inject SSR */
var Datepicker = normalizeComponent_1(
{ render: __vue_render__$4, staticRenderFns: __vue_staticRenderFns__$4 },
__vue_inject_styles__$4,
__vue_script__$4,
__vue_scope_id__$4,
__vue_is_functional_template__$4,
__vue_module_identifier__$4,
browser,
undefined
);
export default Datepicker;
| _createClass |
choice.py | from __future__ import annotations
from abc import ABC
from enum import Enum
from typing import Any, List, Union
from awsstepfuncs.abstract_state import AbstractState
from awsstepfuncs.errors import AWSStepFuncsValueError
from awsstepfuncs.reference_path import ReferencePath
class DataTestExpressionType(Enum):
"""All the different types of data-test expressions.
Check section "Data-test expression" for a full list:
https://states-language.net/#choice-state
"""
STRING_EQUALS = "string_equals"
STRING_EQUALS_PATH = "string_equals_path"
STRING_LESS_THAN = "string_less_than"
STRING_LESS_THAN_PATH = "string_less_than_path"
STRING_GREATER_THAN = "string_greater_than"
STRING_GREATER_THAN_PATH = "string_greater_than_path"
STRING_LESS_THAN_EQUALS = "string_less_than_equals"
STRING_LESS_THAN_EQUALS_PATH = "string_less_than_equals_path"
STRING_GREATER_THAN_EQUALS = "string_greater_than_equals"
STRING_GREATER_THAN_EQUALS_PATH = "string_greater_than_equals_path"
STRING_MATCHES = "string_matches"
NUMERIC_EQUALS = "numeric_equals"
NUMERIC_EQUALS_PATH = "numeric_equals_path"
NUMERIC_LESS_THAN = "numeric_less_than"
NUMERIC_LESS_THAN_PATH = "numeric_less_than_path"
NUMERIC_GREATER_THAN = "numeric_greater_than"
NUMERIC_GREATER_THAN_PATH = "numeric_greater_than_path"
NUMERIC_LESS_THAN_EQUALS = "numeric_less_than_equals"
NUMERIC_LESS_THAN_EQUALS_PATH = "numeric_less_than_equals_path"
NUMERIC_GREATER_THAN_EQUALS = "numeric_greater_than_equals"
NUMERIC_GREATER_THAN_EQUALS_PATH = "numeric_greater_than_equals_path"
BOOLEAN_EQUALS = "boolean_equals"
BOOLEAN_EQUALS_PATH = "boolean_equals_path"
TIMESTAMP_EQUALS = "timestamp_equals"
TIMESTAMP_EQUALS_PATH = "timestamp_equals_path"
TIMESTAMP_LESS_THAN = "timestamp_less_than"
TIMESTAMP_LESS_THAN_PATH = "timestamp_less_than_path"
TIMESTAMP_GREATER_THAN = "timestamp_greater_than"
TIMESTAMP_GREATER_THAN_PATH = "timestamp_greater_than_path"
TIMESTAMP_LESS_THAN_EQUALS = "timestamp_less_than_equals"
TIMESTAMP_LESS_THAN_EQUALS_PATH = "timestamp_less_than_equals_path"
TIMESTAMP_GREATER_THAN_EQUALS = "timestamp_greater_than_equals"
TIMESTAMP_GREATER_THAN_EQUALS_PATH = "timestamp_greater_than_equals_path"
IS_NULL = "is_null"
IS_PRESENT = "is_present"
IS_NUMERIC = "is_numeric"
IS_STRING = "is_string"
IS_BOOLEAN = "is_boolean"
IS_TIMESTAMP = "is_timestamp"
class DataTestExpression:
"""A data-test expression."""
def __init__(self, type: str, expression: Any): # noqa: A002
"""Initialize a data-test expression.
Args:
type: The type of data-test expression, such as string_equals.
expression: The expression to use when evaluating based on the type.
"""
# NOTE: The enum is just used for validation
self.type = DataTestExpressionType(type).value
self.expression = ReferencePath(expression) if "path" in type else expression
def __repr__(self) -> str:
"""A string representation of a data-test expression."""
return f"{self.__class__.__name__}({self.type}={self.expression!r})"
class ChoiceRule:
"""Choice Rules are used in Choices.
When initializing a Choice Rule, a data test expression must be provided. A
Choice Rule evalulates to `True` or `False` based on the data-test
expression on some data.
"""
def __init__(self, variable: str, **data_test_expression: Any):
"""Initialize a Choice Rule.
Args:
variable: The Reference Path to a variable in the state input.
data_test_expression: The data-test expression to use.
Raises:
AWSStepFuncsValueError: Raised when there is not exactly one data-test
expression defined.
"""
self.variable = ReferencePath(variable)
if len(data_test_expression) != 1:
raise AWSStepFuncsValueError(
"Exactly one data-test expression must be defined"
)
self.data_test_expression = DataTestExpression(
*list(data_test_expression.items())[0]
)
def __repr__(self) -> str:
"""Return a string representation of the Choice Rule.
Returns:
A string representing the Choice Rule.
"""
return f"{self.__class__.__name__}({self.variable!r}, {self.data_test_expression.type}={self.data_test_expression.expression!r})"
def evaluate(self, data: Any) -> bool:
"""Evaulate the Choice Rule with a data-test expression on some data.
Args:
data: Input data to evaluate.
Returns:
True or false based on the data and the Choice Rule.
"""
variable_value = self.variable.apply(data)
if variable_value is None:
return False
if "path" in self.data_test_expression.type:
return eval(f"self._{self.data_test_expression.type}(data, variable_value)")
else:
return eval(f"self._{self.data_test_expression.type}(variable_value)")
def _is_present(self, variable_value: Any) -> bool:
return variable_value is not None
def _string_equals(self, variable_value: str) -> bool:
return variable_value == self.data_test_expression.expression
def | (self, data: Any, variable_value: str) -> bool:
string_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_equals, str)):
raise AWSStepFuncsValueError(
"string_equals_path must evaluate to a string value"
)
return variable_value == string_equals
def _string_greater_than(self, variable_value: str) -> bool:
return variable_value > self.data_test_expression.expression # type: ignore
def _string_greater_than_path(self, data: Any, variable_value: str) -> bool:
string_greater_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_greater_than, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_greater_than_path must evaluate to a string value"
)
return variable_value > string_greater_than
def _string_less_than(self, variable_value: str) -> bool:
return variable_value < self.data_test_expression.expression # type: ignore
def _string_less_than_path(self, data: Any, variable_value: str) -> bool:
string_less_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_less_than, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_less_than_path must evaluate to a string value"
)
return variable_value < string_less_than
def _string_greater_than_equals(self, variable_value: str) -> bool:
return variable_value >= self.data_test_expression.expression # type: ignore
def _string_greater_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_greater_than_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_greater_than_equals, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_greater_than_equals_path must evaluate to a string value"
)
return variable_value >= string_greater_than_equals
def _string_less_than_equals(self, variable_value: str) -> bool:
return variable_value <= self.data_test_expression.expression # type: ignore
def _string_less_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_less_than_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_less_than_equals, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_less_than_equals_path must evaluate to a string value"
)
return variable_value <= string_less_than_equals
def _numeric_greater_than_equals(self, variable_value: Union[float, int]) -> bool:
return variable_value >= self.data_test_expression.expression # type: ignore
def _numeric_greater_than_path(
self, data: Any, variable_value: Union[float, int]
) -> bool:
numeric_greater_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (
isinstance(numeric_greater_than, int)
or isinstance(numeric_greater_than, float)
):
raise AWSStepFuncsValueError(
"numeric_greater_than_path must evaluate to a numeric value"
)
return variable_value > numeric_greater_than
def _numeric_less_than(self, variable_value: Union[float, int]) -> bool:
return variable_value < self.data_test_expression.expression # type: ignore
class AbstractChoice(ABC):
"""Choices for Choice State."""
def __init__(self, next_state: AbstractState):
"""Perform common initialization steps for all choices.
Args:
next_state: The state that the choice should transition to if true.
"""
self.next_state = next_state
def evaluate(self, data: Any) -> bool:
"""Evaulate the choice on some given data.
Args:
data: Input data to evaluate.
Raises:
NotImplementedError: Raised if not implemented in child classes.
"""
raise NotImplementedError
class NotChoice(AbstractChoice):
"""Not choice for the Choice State.
The Not Choice can be evaluated based on input data to true or false based
on whether the Choice Rule is false.
"""
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
"""Initialize a NotChoice.
Args:
variable: The Reference Path to a variable in the state input.
next_state: The state to transition to if evaluated to true.
data_test_expression: The data-test expression to use.
"""
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
"""Evaulate the Not Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return not self.choice_rule.evaluate(data)
class AndChoice(AbstractChoice):
"""And Choice for the Choice State.
The And Choice can be evaluated based on input data to true or false based
on whether all Choice Rules are true.
"""
def __init__(
self,
choice_rules: List[ChoiceRule],
*,
next_state: AbstractState,
):
"""Initialize an AndChoice.
Args:
choice_rules: A list of Choice Rules which must ALL evaluate to true.
next_state: The state to transition to if true.
"""
super().__init__(next_state)
self.choice_rules = choice_rules
def evaluate(self, data: Any) -> bool:
"""Evaulate the And Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return all(choice_rule.evaluate(data) for choice_rule in self.choice_rules)
class VariableChoice(AbstractChoice):
"""Variable Choice for the Choice State.
The Variable Choice can be evaluated based on input data to true or false
based on whether the Choice Rule is true.
Be careful if you use a Reference Path that it evaluates to the correct
type.
"""
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
"""Initialize a VariableChoice.
Args:
variable: The Reference Path to a variable in the state input.
next_state: The state to transition to if evaluated to true.
data_test_expression: The data-test expression to use.
"""
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
"""Evaulate the Variable Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return self.choice_rule.evaluate(data)
| _string_equals_path |
rrpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Dict
import torch
from detectron2.layers import ShapeSpec
from ..box_regression import Box2BoxTransformRotated
from .build import PROPOSAL_GENERATOR_REGISTRY
from .rpn import RPN
from .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals
logger = logging.getLogger(__name__)
@PROPOSAL_GENERATOR_REGISTRY.register()
class RRPN(RPN):
"""
Rotated RPN subnetwork.
Please refer to https://arxiv.org/pdf/1703.01086.pdf for the original RRPN paper:
Ma, J., Shao, W., Ye, H., Wang, L., Wang, H., Zheng, Y., & Xue, X. (2018).
Arbitrary-oriented scene text detection via rotation proposals.
IEEE Transactions on Multimedia, 20(11), 3111-3122.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def forward(self, images, features, gt_instances=None):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances] or None
loss: dict[Tensor]
"""
gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
del gt_instances
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RRPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
self.lambda_
)
if self.training:
losses = outputs.losses()
else:
losses = {}
with torch.no_grad():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative | outputs.predict_objectness_logits(),
images,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
return proposals, losses | # w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
proposals = find_top_rrpn_proposals(
outputs.predict_proposals(), |
db.go | // Copyright (c) 2014-2017 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package waddrmgr
import (
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"time"
"github.com/endurio/ndrd/chaincfg/chainhash"
"github.com/endurio/ndrw/walletdb"
)
var (
// LatestMgrVersion is the most recent manager version.
LatestMgrVersion = getLatestVersion()
// latestMgrVersion is the most recent manager version as a variable so
// the tests can change it to force errors.
latestMgrVersion = LatestMgrVersion
)
// ObtainUserInputFunc is a function that reads a user input and returns it as
// a byte stream. It is used to accept data required during upgrades, for e.g.
// wallet seed and private passphrase.
type ObtainUserInputFunc func() ([]byte, error)
// maybeConvertDbError converts the passed error to a ManagerError with an
// error code of ErrDatabase if it is not already a ManagerError. This is
// useful for potential errors returned from managed transaction an other parts
// of the walletdb database.
func maybeConvertDbError(err error) error {
// When the error is already a ManagerError, just return it.
if _, ok := err.(ManagerError); ok {
return err
}
return managerError(ErrDatabase, err.Error(), err)
}
// syncStatus represents a address synchronization status stored in the
// database.
type syncStatus uint8
// These constants define the various supported sync status types.
//
// NOTE: These are currently unused but are being defined for the possibility
// of supporting sync status on a per-address basis.
const (
ssNone syncStatus = 0 // not iota as they need to be stable for db
ssPartial syncStatus = 1
ssFull syncStatus = 2
)
// addressType represents a type of address stored in the database.
type addressType uint8
// These constants define the various supported address types.
const (
adtChain addressType = 0
adtImport addressType = 1 // not iota as they need to be stable for db
adtScript addressType = 2
)
// accountType represents a type of address stored in the database.
type accountType uint8
// These constants define the various supported account types.
const (
// accountDefault is the current "default" account type within the
// database. This is an account that re-uses the key derivation schema
// of BIP0044-like accounts.
accountDefault accountType = 0 // not iota as they need to be stable
)
// dbAccountRow houses information stored about an account in the database.
type dbAccountRow struct {
acctType accountType
rawData []byte // Varies based on account type field.
}
// dbDefaultAccountRow houses additional information stored about a default
// BIP0044-like account in the database.
type dbDefaultAccountRow struct {
dbAccountRow
pubKeyEncrypted []byte
privKeyEncrypted []byte
nextExternalIndex uint32
nextInternalIndex uint32
name string
}
// dbAddressRow houses common information stored about an address in the
// database.
type dbAddressRow struct {
addrType addressType
account uint32
addTime uint64
syncStatus syncStatus
rawData []byte // Varies based on address type field.
}
// dbChainAddressRow houses additional information stored about a chained
// address in the database.
type dbChainAddressRow struct {
dbAddressRow
branch uint32
index uint32
}
// dbImportedAddressRow houses additional information stored about an imported
// public key address in the database.
type dbImportedAddressRow struct {
dbAddressRow
encryptedPubKey []byte
encryptedPrivKey []byte
}
// dbImportedAddressRow houses additional information stored about a script
// address in the database.
type dbScriptAddressRow struct {
dbAddressRow
encryptedHash []byte
encryptedScript []byte
}
// Key names for various database fields.
var (
// nullVall is null byte used as a flag value in a bucket entry
nullVal = []byte{0}
// Bucket names.
// scopeSchemaBucket is the name of the bucket that maps a particular
// manager scope to the type of addresses that should be derived for
// particular branches during key derivation.
scopeSchemaBucketName = []byte("scope-schema")
// scopeBucketNme is the name of the top-level bucket within the
// hierarchy. It maps: purpose || coinType to a new sub-bucket that
// will house a scoped address manager. All buckets below are a child
// of this bucket:
//
// scopeBucket -> scope -> acctBucket
// scopeBucket -> scope -> addrBucket
// scopeBucket -> scope -> usedAddrBucket
// scopeBucket -> scope -> addrAcctIdxBucket
// scopeBucket -> scope -> acctNameIdxBucket
// scopeBucket -> scope -> acctIDIdxBucketName
// scopeBucket -> scope -> metaBucket
// scopeBucket -> scope -> metaBucket -> lastAccountNameKey
// scopeBucket -> scope -> coinTypePrivKey
// scopeBucket -> scope -> coinTypePubKey
scopeBucketName = []byte("scope")
// coinTypePrivKeyName is the name of the key within a particular scope
// bucket that stores the encrypted cointype private keys. Each scope
// within the database will have its own set of coin type keys.
coinTypePrivKeyName = []byte("ctpriv")
// coinTypePrivKeyName is the name of the key within a particular scope
// bucket that stores the encrypted cointype public keys. Each scope
// will have its own set of coin type public keys.
coinTypePubKeyName = []byte("ctpub")
// acctBucketName is the bucket directly below the scope bucket in the
// hierarchy. This bucket stores all the information and indexes
// relevant to an account.
acctBucketName = []byte("acct")
// addrBucketName is the name of the bucket that stores a mapping of
// pubkey hash to address type. This will be used to quickly determine
// if a given address is under our control.
addrBucketName = []byte("addr")
// addrAcctIdxBucketName is used to index account addresses Entries in
// this index may map:
// * addr hash => account id
// * account bucket -> addr hash => null
//
// To fetch the account of an address, lookup the value using the
// address hash.
//
// To fetch all addresses of an account, fetch the account bucket,
// iterate over the keys and fetch the address row from the addr
// bucket.
//
// The index needs to be updated whenever an address is created e.g.
// NewAddress
addrAcctIdxBucketName = []byte("addracctidx")
// acctNameIdxBucketName is used to create an index mapping an account
// name string to the corresponding account id. The index needs to be
// updated whenever the account name and id changes e.g. RenameAccount
//
// string => account_id
acctNameIdxBucketName = []byte("acctnameidx")
// acctIDIdxBucketName is used to create an index mapping an account id
// to the corresponding account name string. The index needs to be
// updated whenever the account name and id changes e.g. RenameAccount
//
// account_id => string
acctIDIdxBucketName = []byte("acctididx")
// usedAddrBucketName is the name of the bucket that stores an
// addresses hash if the address has been used or not.
usedAddrBucketName = []byte("usedaddrs")
// meta is used to store meta-data about the address manager
// e.g. last account number
metaBucketName = []byte("meta")
// lastAccountName is used to store the metadata - last account
// in the manager
lastAccountName = []byte("lastaccount")
// mainBucketName is the name of the bucket that stores the encrypted
// crypto keys that encrypt all other generated keys, the watch only
// flag, the master private key (encrypted), the master HD private key
// (encrypted), and also versioning information.
mainBucketName = []byte("main")
// masterHDPrivName is the name of the key that stores the master HD
// private key. This key is encrypted with the master private crypto
// encryption key. This resides under the main bucket.
masterHDPrivName = []byte("mhdpriv")
// masterHDPubName is the name of the key that stores the master HD
// public key. This key is encrypted with the master public crypto
// encryption key. This reside under the main bucket.
masterHDPubName = []byte("mhdpub")
// syncBucketName is the name of the bucket that stores the current
// sync state of the root manager.
syncBucketName = []byte("sync")
// Db related key names (main bucket).
mgrVersionName = []byte("mgrver")
mgrCreateDateName = []byte("mgrcreated")
// Crypto related key names (main bucket).
masterPrivKeyName = []byte("mpriv")
masterPubKeyName = []byte("mpub")
cryptoPrivKeyName = []byte("cpriv")
cryptoPubKeyName = []byte("cpub")
cryptoScriptKeyName = []byte("cscript")
watchingOnlyName = []byte("watchonly")
// Sync related key names (sync bucket).
syncedToName = []byte("syncedto")
startBlockName = []byte("startblock")
birthdayName = []byte("birthday")
birthdayBlockName = []byte("birthdayblock")
birthdayBlockVerifiedName = []byte("birthdayblockverified")
)
// uint32ToBytes converts a 32 bit unsigned integer into a 4-byte slice in
// little-endian order: 1 -> [1 0 0 0].
func uint32ToBytes(number uint32) []byte {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, number)
return buf
}
// uint64ToBytes converts a 64 bit unsigned integer into a 8-byte slice in
// little-endian order: 1 -> [1 0 0 0 0 0 0 0].
func uint64ToBytes(number uint64) []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, number)
return buf
}
// stringToBytes converts a string into a variable length byte slice in
// little-endian order: "abc" -> [3 0 0 0 61 62 63]
func stringToBytes(s string) []byte {
// The serialized format is:
// <size><string>
//
// 4 bytes string size + string
size := len(s)
buf := make([]byte, 4+size)
copy(buf[0:4], uint32ToBytes(uint32(size)))
copy(buf[4:4+size], s)
return buf
}
// scopeKeySize is the size of a scope as stored within the database.
const scopeKeySize = 8
// scopeToBytes transforms a manager's scope into the form that will be used to
// retrieve the bucket that all information for a particular scope is stored
// under
func scopeToBytes(scope *KeyScope) [scopeKeySize]byte {
var scopeBytes [scopeKeySize]byte
binary.LittleEndian.PutUint32(scopeBytes[:], scope.Purpose)
binary.LittleEndian.PutUint32(scopeBytes[4:], scope.Coin)
return scopeBytes
}
// scopeFromBytes decodes a serializes manager scope into its concrete manager
// scope struct.
func scopeFromBytes(scopeBytes []byte) KeyScope {
return KeyScope{
Purpose: binary.LittleEndian.Uint32(scopeBytes[:]),
Coin: binary.LittleEndian.Uint32(scopeBytes[4:]),
}
}
// scopeSchemaToBytes encodes the passed scope schema as a set of bytes
// suitable for storage within the database.
func scopeSchemaToBytes(schema *ScopeAddrSchema) []byte {
var schemaBytes [2]byte
schemaBytes[0] = byte(schema.InternalAddrType)
schemaBytes[1] = byte(schema.ExternalAddrType)
return schemaBytes[:]
}
// scopeSchemaFromBytes decodes a new scope schema instance from the set of
// serialized bytes.
func scopeSchemaFromBytes(schemaBytes []byte) *ScopeAddrSchema {
return &ScopeAddrSchema{
InternalAddrType: AddressType(schemaBytes[0]),
ExternalAddrType: AddressType(schemaBytes[1]),
}
}
// fetchScopeAddrSchema will attempt to retrieve the address schema for a
// particular manager scope stored within the database. These are used in order
// to properly type each address generated by the scope address manager.
func fetchScopeAddrSchema(ns walletdb.ReadBucket,
scope *KeyScope) (*ScopeAddrSchema, error) {
schemaBucket := ns.NestedReadBucket(scopeSchemaBucketName)
if schemaBucket == nil {
str := fmt.Sprintf("unable to find scope schema bucket")
return nil, managerError(ErrScopeNotFound, str, nil)
}
scopeKey := scopeToBytes(scope)
schemaBytes := schemaBucket.Get(scopeKey[:])
if schemaBytes == nil {
str := fmt.Sprintf("unable to find scope %v", scope)
return nil, managerError(ErrScopeNotFound, str, nil)
}
return scopeSchemaFromBytes(schemaBytes), nil
}
// putScopeAddrSchema attempts to store the passed addr scehma for the given
// manager scope.
func putScopeAddrTypes(ns walletdb.ReadWriteBucket, scope *KeyScope,
schema *ScopeAddrSchema) error {
scopeSchemaBucket := ns.NestedReadWriteBucket(scopeSchemaBucketName)
if scopeSchemaBucket == nil {
str := fmt.Sprintf("unable to find scope schema bucket")
return managerError(ErrScopeNotFound, str, nil)
}
scopeKey := scopeToBytes(scope)
schemaBytes := scopeSchemaToBytes(schema)
return scopeSchemaBucket.Put(scopeKey[:], schemaBytes)
}
func fetchReadScopeBucket(ns walletdb.ReadBucket, scope *KeyScope) (walletdb.ReadBucket, error) {
rootScopeBucket := ns.NestedReadBucket(scopeBucketName)
scopeKey := scopeToBytes(scope)
scopedBucket := rootScopeBucket.NestedReadBucket(scopeKey[:])
if scopedBucket == nil {
str := fmt.Sprintf("unable to find scope %v", scope)
return nil, managerError(ErrScopeNotFound, str, nil)
}
return scopedBucket, nil
}
func fetchWriteScopeBucket(ns walletdb.ReadWriteBucket,
scope *KeyScope) (walletdb.ReadWriteBucket, error) {
rootScopeBucket := ns.NestedReadWriteBucket(scopeBucketName)
scopeKey := scopeToBytes(scope)
scopedBucket := rootScopeBucket.NestedReadWriteBucket(scopeKey[:])
if scopedBucket == nil {
str := fmt.Sprintf("unable to find scope %v", scope)
return nil, managerError(ErrScopeNotFound, str, nil)
}
return scopedBucket, nil
}
// fetchManagerVersion fetches the current manager version from the database.
func fetchManagerVersion(ns walletdb.ReadBucket) (uint32, error) {
mainBucket := ns.NestedReadBucket(mainBucketName)
verBytes := mainBucket.Get(mgrVersionName)
if verBytes == nil {
str := "required version number not stored in database"
return 0, managerError(ErrDatabase, str, nil)
}
version := binary.LittleEndian.Uint32(verBytes)
return version, nil
}
// putManagerVersion stores the provided version to the database.
func putManagerVersion(ns walletdb.ReadWriteBucket, version uint32) error {
bucket := ns.NestedReadWriteBucket(mainBucketName)
verBytes := uint32ToBytes(version)
err := bucket.Put(mgrVersionName, verBytes)
if err != nil {
str := "failed to store version"
return managerError(ErrDatabase, str, err)
}
return nil
}
// fetchMasterKeyParams loads the master key parameters needed to derive them
// (when given the correct user-supplied passphrase) from the database. Either
// returned value can be nil, but in practice only the private key params will
// be nil for a watching-only database.
func fetchMasterKeyParams(ns walletdb.ReadBucket) ([]byte, []byte, error) {
bucket := ns.NestedReadBucket(mainBucketName)
// Load the master public key parameters. Required.
val := bucket.Get(masterPubKeyName)
if val == nil {
str := "required master public key parameters not stored in " +
"database"
return nil, nil, managerError(ErrDatabase, str, nil)
}
pubParams := make([]byte, len(val))
copy(pubParams, val)
// Load the master private key parameters if they were stored.
var privParams []byte
val = bucket.Get(masterPrivKeyName)
if val != nil {
privParams = make([]byte, len(val))
copy(privParams, val)
}
return pubParams, privParams, nil
}
// putMasterKeyParams stores the master key parameters needed to derive them to
// the database. Either parameter can be nil in which case no value is
// written for the parameter.
func putMasterKeyParams(ns walletdb.ReadWriteBucket, pubParams, privParams []byte) error {
bucket := ns.NestedReadWriteBucket(mainBucketName)
if privParams != nil {
err := bucket.Put(masterPrivKeyName, privParams)
if err != nil {
str := "failed to store master private key parameters"
return managerError(ErrDatabase, str, err)
}
}
if pubParams != nil {
err := bucket.Put(masterPubKeyName, pubParams)
if err != nil {
str := "failed to store master public key parameters"
return managerError(ErrDatabase, str, err)
}
}
return nil
}
// fetchCoinTypeKeys loads the encrypted cointype keys which are in turn used
// to derive the extended keys for all accounts. Each cointype key is
// associated with a particular manager scoped.
func fetchCoinTypeKeys(ns walletdb.ReadBucket, scope *KeyScope) ([]byte, []byte, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return nil, nil, err
}
coinTypePubKeyEnc := scopedBucket.Get(coinTypePubKeyName)
if coinTypePubKeyEnc == nil {
str := "required encrypted cointype public key not stored in database"
return nil, nil, managerError(ErrDatabase, str, nil)
}
coinTypePrivKeyEnc := scopedBucket.Get(coinTypePrivKeyName)
if coinTypePrivKeyEnc == nil {
str := "required encrypted cointype private key not stored in database"
return nil, nil, managerError(ErrDatabase, str, nil)
}
return coinTypePubKeyEnc, coinTypePrivKeyEnc, nil
}
// putCoinTypeKeys stores the encrypted cointype keys which are in turn used to
// derive the extended keys for all accounts. Either parameter can be nil in
// which case no value is written for the parameter. Each cointype key is
// associated with a particular manager scope.
func putCoinTypeKeys(ns walletdb.ReadWriteBucket, scope *KeyScope,
coinTypePubKeyEnc []byte, coinTypePrivKeyEnc []byte) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
if coinTypePubKeyEnc != nil {
err := scopedBucket.Put(coinTypePubKeyName, coinTypePubKeyEnc)
if err != nil {
str := "failed to store encrypted cointype public key"
return managerError(ErrDatabase, str, err)
}
}
if coinTypePrivKeyEnc != nil {
err := scopedBucket.Put(coinTypePrivKeyName, coinTypePrivKeyEnc)
if err != nil {
str := "failed to store encrypted cointype private key"
return managerError(ErrDatabase, str, err)
}
}
return nil
}
// putMasterHDKeys stores the encrypted master HD keys in the top level main
// bucket. These are required in order to create any new manager scopes, as
// those are created via hardened derivation of the children of this key.
func putMasterHDKeys(ns walletdb.ReadWriteBucket, masterHDPrivEnc, masterHDPubEnc []byte) error {
// As this is the key for the root manager, we don't need to fetch any
// particular scope, and can insert directly within the main bucket.
bucket := ns.NestedReadWriteBucket(mainBucketName)
// Now that we have the main bucket, we can directly store each of the
// relevant keys. If we're in watch only mode, then some or all of
// these keys might not be available.
if masterHDPrivEnc != nil {
err := bucket.Put(masterHDPrivName, masterHDPrivEnc)
if err != nil {
str := "failed to store encrypted master HD private key"
return managerError(ErrDatabase, str, err)
}
}
if masterHDPubEnc != nil {
err := bucket.Put(masterHDPubName, masterHDPubEnc)
if err != nil {
str := "failed to store encrypted master HD public key"
return managerError(ErrDatabase, str, err)
}
}
return nil
}
// fetchMasterHDKeys attempts to fetch both the master HD private and public
// keys from the database. If this is a watch only wallet, then it's possible
// that the master private key isn't stored.
func fetchMasterHDKeys(ns walletdb.ReadBucket) ([]byte, []byte, error) {
bucket := ns.NestedReadBucket(mainBucketName)
var masterHDPrivEnc, masterHDPubEnc []byte
// First, we'll try to fetch the master private key. If this database
// is watch only, or the master has been neutered, then this won't be
// found on disk.
key := bucket.Get(masterHDPrivName)
if key != nil {
masterHDPrivEnc = make([]byte, len(key))
copy(masterHDPrivEnc[:], key)
}
key = bucket.Get(masterHDPubName)
if key != nil {
masterHDPubEnc = make([]byte, len(key))
copy(masterHDPubEnc[:], key)
}
return masterHDPrivEnc, masterHDPubEnc, nil
}
// fetchCryptoKeys loads the encrypted crypto keys which are in turn used to
// protect the extended keys, imported keys, and scripts. Any of the returned
// values can be nil, but in practice only the crypto private and script keys
// will be nil for a watching-only database.
func fetchCryptoKeys(ns walletdb.ReadBucket) ([]byte, []byte, []byte, error) {
bucket := ns.NestedReadBucket(mainBucketName)
// Load the crypto public key parameters. Required.
val := bucket.Get(cryptoPubKeyName)
if val == nil {
str := "required encrypted crypto public not stored in database"
return nil, nil, nil, managerError(ErrDatabase, str, nil)
}
pubKey := make([]byte, len(val))
copy(pubKey, val)
// Load the crypto private key parameters if they were stored.
var privKey []byte
val = bucket.Get(cryptoPrivKeyName)
if val != nil {
privKey = make([]byte, len(val))
copy(privKey, val)
}
// Load the crypto script key parameters if they were stored.
var scriptKey []byte
val = bucket.Get(cryptoScriptKeyName)
if val != nil {
scriptKey = make([]byte, len(val))
copy(scriptKey, val)
}
return pubKey, privKey, scriptKey, nil
}
// putCryptoKeys stores the encrypted crypto keys which are in turn used to
// protect the extended and imported keys. Either parameter can be nil in
// which case no value is written for the parameter.
func putCryptoKeys(ns walletdb.ReadWriteBucket, pubKeyEncrypted, privKeyEncrypted,
scriptKeyEncrypted []byte) error {
bucket := ns.NestedReadWriteBucket(mainBucketName)
if pubKeyEncrypted != nil {
err := bucket.Put(cryptoPubKeyName, pubKeyEncrypted)
if err != nil {
str := "failed to store encrypted crypto public key"
return managerError(ErrDatabase, str, err)
}
}
if privKeyEncrypted != nil {
err := bucket.Put(cryptoPrivKeyName, privKeyEncrypted)
if err != nil {
str := "failed to store encrypted crypto private key"
return managerError(ErrDatabase, str, err)
}
}
if scriptKeyEncrypted != nil {
err := bucket.Put(cryptoScriptKeyName, scriptKeyEncrypted)
if err != nil {
str := "failed to store encrypted crypto script key"
return managerError(ErrDatabase, str, err)
}
}
return nil
}
// fetchWatchingOnly loads the watching-only flag from the database.
func fetchWatchingOnly(ns walletdb.ReadBucket) (bool, error) {
bucket := ns.NestedReadBucket(mainBucketName)
buf := bucket.Get(watchingOnlyName)
if len(buf) != 1 {
str := "malformed watching-only flag stored in database"
return false, managerError(ErrDatabase, str, nil)
}
return buf[0] != 0, nil
}
// putWatchingOnly stores the watching-only flag to the database.
func putWatchingOnly(ns walletdb.ReadWriteBucket, watchingOnly bool) error {
bucket := ns.NestedReadWriteBucket(mainBucketName)
var encoded byte
if watchingOnly {
encoded = 1
}
if err := bucket.Put(watchingOnlyName, []byte{encoded}); err != nil {
str := "failed to store watching only flag"
return managerError(ErrDatabase, str, err)
}
return nil
}
// deserializeAccountRow deserializes the passed serialized account information.
// This is used as a common base for the various account types to deserialize
// the common parts.
func deserializeAccountRow(accountID []byte, serializedAccount []byte) (*dbAccountRow, error) {
// The serialized account format is:
// <acctType><rdlen><rawdata>
//
// 1 byte acctType + 4 bytes raw data length + raw data
// Given the above, the length of the entry must be at a minimum
// the constant value sizes.
if len(serializedAccount) < 5 {
str := fmt.Sprintf("malformed serialized account for key %x",
accountID)
return nil, managerError(ErrDatabase, str, nil)
}
row := dbAccountRow{}
row.acctType = accountType(serializedAccount[0])
rdlen := binary.LittleEndian.Uint32(serializedAccount[1:5])
row.rawData = make([]byte, rdlen)
copy(row.rawData, serializedAccount[5:5+rdlen])
return &row, nil
}
// serializeAccountRow returns the serialization of the passed account row.
func serializeAccountRow(row *dbAccountRow) []byte {
// The serialized account format is:
// <acctType><rdlen><rawdata>
//
// 1 byte acctType + 4 bytes raw data length + raw data
rdlen := len(row.rawData)
buf := make([]byte, 5+rdlen)
buf[0] = byte(row.acctType)
binary.LittleEndian.PutUint32(buf[1:5], uint32(rdlen))
copy(buf[5:5+rdlen], row.rawData)
return buf
}
// deserializeDefaultAccountRow deserializes the raw data from the passed
// account row as a BIP0044-like account.
func deserializeDefaultAccountRow(accountID []byte, row *dbAccountRow) (*dbDefaultAccountRow, error) {
// The serialized BIP0044 account raw data format is:
// <encpubkeylen><encpubkey><encprivkeylen><encprivkey><nextextidx>
// <nextintidx><namelen><name>
//
// 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted
// privkey len + encrypted privkey + 4 bytes next external index +
// 4 bytes next internal index + 4 bytes name len + name
// Given the above, the length of the entry must be at a minimum
// the constant value sizes.
if len(row.rawData) < 20 {
str := fmt.Sprintf("malformed serialized bip0044 account for "+
"key %x", accountID)
return nil, managerError(ErrDatabase, str, nil)
}
retRow := dbDefaultAccountRow{
dbAccountRow: *row,
}
pubLen := binary.LittleEndian.Uint32(row.rawData[0:4])
retRow.pubKeyEncrypted = make([]byte, pubLen)
copy(retRow.pubKeyEncrypted, row.rawData[4:4+pubLen])
offset := 4 + pubLen
privLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
retRow.privKeyEncrypted = make([]byte, privLen)
copy(retRow.privKeyEncrypted, row.rawData[offset:offset+privLen])
offset += privLen
retRow.nextExternalIndex = binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
retRow.nextInternalIndex = binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
nameLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
retRow.name = string(row.rawData[offset : offset+nameLen])
return &retRow, nil
}
// serializeDefaultAccountRow returns the serialization of the raw data field
// for a BIP0044-like account.
func serializeDefaultAccountRow(encryptedPubKey, encryptedPrivKey []byte,
nextExternalIndex, nextInternalIndex uint32, name string) []byte {
// The serialized BIP0044 account raw data format is:
// <encpubkeylen><encpubkey><encprivkeylen><encprivkey><nextextidx>
// <nextintidx><namelen><name>
//
// 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted
// privkey len + encrypted privkey + 4 bytes next external index +
// 4 bytes next internal index + 4 bytes name len + name
pubLen := uint32(len(encryptedPubKey))
privLen := uint32(len(encryptedPrivKey))
nameLen := uint32(len(name))
rawData := make([]byte, 20+pubLen+privLen+nameLen)
binary.LittleEndian.PutUint32(rawData[0:4], pubLen)
copy(rawData[4:4+pubLen], encryptedPubKey)
offset := 4 + pubLen
binary.LittleEndian.PutUint32(rawData[offset:offset+4], privLen)
offset += 4
copy(rawData[offset:offset+privLen], encryptedPrivKey)
offset += privLen
binary.LittleEndian.PutUint32(rawData[offset:offset+4], nextExternalIndex)
offset += 4
binary.LittleEndian.PutUint32(rawData[offset:offset+4], nextInternalIndex)
offset += 4
binary.LittleEndian.PutUint32(rawData[offset:offset+4], nameLen)
offset += 4
copy(rawData[offset:offset+nameLen], name)
return rawData
}
// forEachKeyScope calls the given function for each known manager scope
// within the set of scopes known by the root manager.
func forEachKeyScope(ns walletdb.ReadBucket, fn func(KeyScope) error) error {
bucket := ns.NestedReadBucket(scopeBucketName)
return bucket.ForEach(func(k, v []byte) error {
// skip non-bucket
if len(k) != 8 {
return nil
}
scope := KeyScope{
Purpose: binary.LittleEndian.Uint32(k[:]),
Coin: binary.LittleEndian.Uint32(k[4:]),
}
return fn(scope)
})
}
// forEachAccount calls the given function with each account stored in the
// manager, breaking early on error.
func forEachAccount(ns walletdb.ReadBucket, scope *KeyScope,
fn func(account uint32) error) error {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return err
}
acctBucket := scopedBucket.NestedReadBucket(acctBucketName)
return acctBucket.ForEach(func(k, v []byte) error {
// Skip buckets.
if v == nil {
return nil
}
return fn(binary.LittleEndian.Uint32(k))
})
}
// fetchLastAccount retrieves the last account from the database.
func fetchLastAccount(ns walletdb.ReadBucket, scope *KeyScope) (uint32, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return 0, err
}
metaBucket := scopedBucket.NestedReadBucket(metaBucketName)
val := metaBucket.Get(lastAccountName)
if len(val) != 4 {
str := fmt.Sprintf("malformed metadata '%s' stored in database",
lastAccountName)
return 0, managerError(ErrDatabase, str, nil)
}
account := binary.LittleEndian.Uint32(val[0:4])
return account, nil
}
// fetchAccountName retrieves the account name given an account number from the
// database.
func fetchAccountName(ns walletdb.ReadBucket, scope *KeyScope,
account uint32) (string, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return "", err
}
acctIDxBucket := scopedBucket.NestedReadBucket(acctIDIdxBucketName)
val := acctIDxBucket.Get(uint32ToBytes(account))
if val == nil {
str := fmt.Sprintf("account %d not found", account)
return "", managerError(ErrAccountNotFound, str, nil)
}
offset := uint32(0)
nameLen := binary.LittleEndian.Uint32(val[offset : offset+4])
offset += 4
acctName := string(val[offset : offset+nameLen])
return acctName, nil
}
// fetchAccountByName retrieves the account number given an account name from
// the database.
func fetchAccountByName(ns walletdb.ReadBucket, scope *KeyScope,
name string) (uint32, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return 0, err
}
idxBucket := scopedBucket.NestedReadBucket(acctNameIdxBucketName)
val := idxBucket.Get(stringToBytes(name))
if val == nil {
str := fmt.Sprintf("account name '%s' not found", name)
return 0, managerError(ErrAccountNotFound, str, nil)
}
return binary.LittleEndian.Uint32(val), nil
}
// fetchAccountInfo loads information about the passed account from the
// database.
func fetchAccountInfo(ns walletdb.ReadBucket, scope *KeyScope,
account uint32) (interface{}, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return nil, err
}
acctBucket := scopedBucket.NestedReadBucket(acctBucketName)
accountID := uint32ToBytes(account)
serializedRow := acctBucket.Get(accountID)
if serializedRow == nil {
str := fmt.Sprintf("account %d not found", account)
return nil, managerError(ErrAccountNotFound, str, nil)
}
row, err := deserializeAccountRow(accountID, serializedRow)
if err != nil {
return nil, err
}
switch row.acctType {
case accountDefault:
return deserializeDefaultAccountRow(accountID, row)
}
str := fmt.Sprintf("unsupported account type '%d'", row.acctType)
return nil, managerError(ErrDatabase, str, nil)
}
// deleteAccountNameIndex deletes the given key from the account name index of the database.
func deleteAccountNameIndex(ns walletdb.ReadWriteBucket, scope *KeyScope,
name string) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(acctNameIdxBucketName)
// Delete the account name key
err = bucket.Delete(stringToBytes(name))
if err != nil {
str := fmt.Sprintf("failed to delete account name index key %s", name)
return managerError(ErrDatabase, str, err)
}
return nil
}
// deleteAccounIdIndex deletes the given key from the account id index of the database.
func deleteAccountIDIndex(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(acctIDIdxBucketName)
// Delete the account id key
err = bucket.Delete(uint32ToBytes(account))
if err != nil {
str := fmt.Sprintf("failed to delete account id index key %d", account)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putAccountNameIndex stores the given key to the account name index of the
// database.
func putAccountNameIndex(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32, name string) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(acctNameIdxBucketName)
// Write the account number keyed by the account name.
err = bucket.Put(stringToBytes(name), uint32ToBytes(account))
if err != nil {
str := fmt.Sprintf("failed to store account name index key %s", name)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putAccountIDIndex stores the given key to the account id index of the database.
func | (ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32, name string) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(acctIDIdxBucketName)
// Write the account number keyed by the account id.
err = bucket.Put(uint32ToBytes(account), stringToBytes(name))
if err != nil {
str := fmt.Sprintf("failed to store account id index key %s", name)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putAddrAccountIndex stores the given key to the address account index of the
// database.
func putAddrAccountIndex(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32, addrHash []byte) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(addrAcctIdxBucketName)
// Write account keyed by address hash
err = bucket.Put(addrHash, uint32ToBytes(account))
if err != nil {
return nil
}
bucket, err = bucket.CreateBucketIfNotExists(uint32ToBytes(account))
if err != nil {
return err
}
// In account bucket, write a null value keyed by the address hash
err = bucket.Put(addrHash, nullVal)
if err != nil {
str := fmt.Sprintf("failed to store address account index key %s", addrHash)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putAccountRow stores the provided account information to the database. This
// is used a common base for storing the various account types.
func putAccountRow(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32, row *dbAccountRow) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(acctBucketName)
// Write the serialized value keyed by the account number.
err = bucket.Put(uint32ToBytes(account), serializeAccountRow(row))
if err != nil {
str := fmt.Sprintf("failed to store account %d", account)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putAccountInfo stores the provided account information to the database.
func putAccountInfo(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32, encryptedPubKey, encryptedPrivKey []byte,
nextExternalIndex, nextInternalIndex uint32, name string) error {
rawData := serializeDefaultAccountRow(
encryptedPubKey, encryptedPrivKey, nextExternalIndex,
nextInternalIndex, name,
)
// TODO(roasbeef): pass scope bucket directly??
acctRow := dbAccountRow{
acctType: accountDefault,
rawData: rawData,
}
if err := putAccountRow(ns, scope, account, &acctRow); err != nil {
return err
}
// Update account id index.
if err := putAccountIDIndex(ns, scope, account, name); err != nil {
return err
}
// Update account name index.
if err := putAccountNameIndex(ns, scope, account, name); err != nil {
return err
}
return nil
}
// putLastAccount stores the provided metadata - last account - to the
// database.
func putLastAccount(ns walletdb.ReadWriteBucket, scope *KeyScope,
account uint32) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(metaBucketName)
err = bucket.Put(lastAccountName, uint32ToBytes(account))
if err != nil {
str := fmt.Sprintf("failed to update metadata '%s'", lastAccountName)
return managerError(ErrDatabase, str, err)
}
return nil
}
// deserializeAddressRow deserializes the passed serialized address
// information. This is used as a common base for the various address types to
// deserialize the common parts.
func deserializeAddressRow(serializedAddress []byte) (*dbAddressRow, error) {
// The serialized address format is:
// <addrType><account><addedTime><syncStatus><rawdata>
//
// 1 byte addrType + 4 bytes account + 8 bytes addTime + 1 byte
// syncStatus + 4 bytes raw data length + raw data
// Given the above, the length of the entry must be at a minimum
// the constant value sizes.
if len(serializedAddress) < 18 {
str := "malformed serialized address"
return nil, managerError(ErrDatabase, str, nil)
}
row := dbAddressRow{}
row.addrType = addressType(serializedAddress[0])
row.account = binary.LittleEndian.Uint32(serializedAddress[1:5])
row.addTime = binary.LittleEndian.Uint64(serializedAddress[5:13])
row.syncStatus = syncStatus(serializedAddress[13])
rdlen := binary.LittleEndian.Uint32(serializedAddress[14:18])
row.rawData = make([]byte, rdlen)
copy(row.rawData, serializedAddress[18:18+rdlen])
return &row, nil
}
// serializeAddressRow returns the serialization of the passed address row.
func serializeAddressRow(row *dbAddressRow) []byte {
// The serialized address format is:
// <addrType><account><addedTime><syncStatus><commentlen><comment>
// <rawdata>
//
// 1 byte addrType + 4 bytes account + 8 bytes addTime + 1 byte
// syncStatus + 4 bytes raw data length + raw data
rdlen := len(row.rawData)
buf := make([]byte, 18+rdlen)
buf[0] = byte(row.addrType)
binary.LittleEndian.PutUint32(buf[1:5], row.account)
binary.LittleEndian.PutUint64(buf[5:13], row.addTime)
buf[13] = byte(row.syncStatus)
binary.LittleEndian.PutUint32(buf[14:18], uint32(rdlen))
copy(buf[18:18+rdlen], row.rawData)
return buf
}
// deserializeChainedAddress deserializes the raw data from the passed address
// row as a chained address.
func deserializeChainedAddress(row *dbAddressRow) (*dbChainAddressRow, error) {
// The serialized chain address raw data format is:
// <branch><index>
//
// 4 bytes branch + 4 bytes address index
if len(row.rawData) != 8 {
str := "malformed serialized chained address"
return nil, managerError(ErrDatabase, str, nil)
}
retRow := dbChainAddressRow{
dbAddressRow: *row,
}
retRow.branch = binary.LittleEndian.Uint32(row.rawData[0:4])
retRow.index = binary.LittleEndian.Uint32(row.rawData[4:8])
return &retRow, nil
}
// serializeChainedAddress returns the serialization of the raw data field for
// a chained address.
func serializeChainedAddress(branch, index uint32) []byte {
// The serialized chain address raw data format is:
// <branch><index>
//
// 4 bytes branch + 4 bytes address index
rawData := make([]byte, 8)
binary.LittleEndian.PutUint32(rawData[0:4], branch)
binary.LittleEndian.PutUint32(rawData[4:8], index)
return rawData
}
// deserializeImportedAddress deserializes the raw data from the passed address
// row as an imported address.
func deserializeImportedAddress(row *dbAddressRow) (*dbImportedAddressRow, error) {
// The serialized imported address raw data format is:
// <encpubkeylen><encpubkey><encprivkeylen><encprivkey>
//
// 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted
// privkey len + encrypted privkey
// Given the above, the length of the entry must be at a minimum
// the constant value sizes.
if len(row.rawData) < 8 {
str := "malformed serialized imported address"
return nil, managerError(ErrDatabase, str, nil)
}
retRow := dbImportedAddressRow{
dbAddressRow: *row,
}
pubLen := binary.LittleEndian.Uint32(row.rawData[0:4])
retRow.encryptedPubKey = make([]byte, pubLen)
copy(retRow.encryptedPubKey, row.rawData[4:4+pubLen])
offset := 4 + pubLen
privLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
retRow.encryptedPrivKey = make([]byte, privLen)
copy(retRow.encryptedPrivKey, row.rawData[offset:offset+privLen])
return &retRow, nil
}
// serializeImportedAddress returns the serialization of the raw data field for
// an imported address.
func serializeImportedAddress(encryptedPubKey, encryptedPrivKey []byte) []byte {
// The serialized imported address raw data format is:
// <encpubkeylen><encpubkey><encprivkeylen><encprivkey>
//
// 4 bytes encrypted pubkey len + encrypted pubkey + 4 bytes encrypted
// privkey len + encrypted privkey
pubLen := uint32(len(encryptedPubKey))
privLen := uint32(len(encryptedPrivKey))
rawData := make([]byte, 8+pubLen+privLen)
binary.LittleEndian.PutUint32(rawData[0:4], pubLen)
copy(rawData[4:4+pubLen], encryptedPubKey)
offset := 4 + pubLen
binary.LittleEndian.PutUint32(rawData[offset:offset+4], privLen)
offset += 4
copy(rawData[offset:offset+privLen], encryptedPrivKey)
return rawData
}
// deserializeScriptAddress deserializes the raw data from the passed address
// row as a script address.
func deserializeScriptAddress(row *dbAddressRow) (*dbScriptAddressRow, error) {
// The serialized script address raw data format is:
// <encscripthashlen><encscripthash><encscriptlen><encscript>
//
// 4 bytes encrypted script hash len + encrypted script hash + 4 bytes
// encrypted script len + encrypted script
// Given the above, the length of the entry must be at a minimum
// the constant value sizes.
if len(row.rawData) < 8 {
str := "malformed serialized script address"
return nil, managerError(ErrDatabase, str, nil)
}
retRow := dbScriptAddressRow{
dbAddressRow: *row,
}
hashLen := binary.LittleEndian.Uint32(row.rawData[0:4])
retRow.encryptedHash = make([]byte, hashLen)
copy(retRow.encryptedHash, row.rawData[4:4+hashLen])
offset := 4 + hashLen
scriptLen := binary.LittleEndian.Uint32(row.rawData[offset : offset+4])
offset += 4
retRow.encryptedScript = make([]byte, scriptLen)
copy(retRow.encryptedScript, row.rawData[offset:offset+scriptLen])
return &retRow, nil
}
// serializeScriptAddress returns the serialization of the raw data field for
// a script address.
func serializeScriptAddress(encryptedHash, encryptedScript []byte) []byte {
// The serialized script address raw data format is:
// <encscripthashlen><encscripthash><encscriptlen><encscript>
//
// 4 bytes encrypted script hash len + encrypted script hash + 4 bytes
// encrypted script len + encrypted script
hashLen := uint32(len(encryptedHash))
scriptLen := uint32(len(encryptedScript))
rawData := make([]byte, 8+hashLen+scriptLen)
binary.LittleEndian.PutUint32(rawData[0:4], hashLen)
copy(rawData[4:4+hashLen], encryptedHash)
offset := 4 + hashLen
binary.LittleEndian.PutUint32(rawData[offset:offset+4], scriptLen)
offset += 4
copy(rawData[offset:offset+scriptLen], encryptedScript)
return rawData
}
// fetchAddressByHash loads address information for the provided address hash
// from the database. The returned value is one of the address rows for the
// specific address type. The caller should use type assertions to ascertain
// the type. The caller should prefix the error message with the address hash
// which caused the failure.
func fetchAddressByHash(ns walletdb.ReadBucket, scope *KeyScope,
addrHash []byte) (interface{}, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return nil, err
}
bucket := scopedBucket.NestedReadBucket(addrBucketName)
serializedRow := bucket.Get(addrHash[:])
if serializedRow == nil {
str := "address not found"
return nil, managerError(ErrAddressNotFound, str, nil)
}
row, err := deserializeAddressRow(serializedRow)
if err != nil {
return nil, err
}
switch row.addrType {
case adtChain:
return deserializeChainedAddress(row)
case adtImport:
return deserializeImportedAddress(row)
case adtScript:
return deserializeScriptAddress(row)
}
str := fmt.Sprintf("unsupported address type '%d'", row.addrType)
return nil, managerError(ErrDatabase, str, nil)
}
// fetchAddressUsed returns true if the provided address id was flagged as used.
func fetchAddressUsed(ns walletdb.ReadBucket, scope *KeyScope,
addressID []byte) bool {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return false
}
bucket := scopedBucket.NestedReadBucket(usedAddrBucketName)
addrHash := sha256.Sum256(addressID)
return bucket.Get(addrHash[:]) != nil
}
// markAddressUsed flags the provided address id as used in the database.
func markAddressUsed(ns walletdb.ReadWriteBucket, scope *KeyScope,
addressID []byte) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(usedAddrBucketName)
addrHash := sha256.Sum256(addressID)
val := bucket.Get(addrHash[:])
if val != nil {
return nil
}
err = bucket.Put(addrHash[:], []byte{0})
if err != nil {
str := fmt.Sprintf("failed to mark address used %x", addressID)
return managerError(ErrDatabase, str, err)
}
return nil
}
// fetchAddress loads address information for the provided address id from the
// database. The returned value is one of the address rows for the specific
// address type. The caller should use type assertions to ascertain the type.
// The caller should prefix the error message with the address which caused the
// failure.
func fetchAddress(ns walletdb.ReadBucket, scope *KeyScope,
addressID []byte) (interface{}, error) {
addrHash := sha256.Sum256(addressID)
return fetchAddressByHash(ns, scope, addrHash[:])
}
// putAddress stores the provided address information to the database. This is
// used a common base for storing the various address types.
func putAddress(ns walletdb.ReadWriteBucket, scope *KeyScope,
addressID []byte, row *dbAddressRow) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadWriteBucket(addrBucketName)
// Write the serialized value keyed by the hash of the address. The
// additional hash is used to conceal the actual address while still
// allowed keyed lookups.
addrHash := sha256.Sum256(addressID)
err = bucket.Put(addrHash[:], serializeAddressRow(row))
if err != nil {
str := fmt.Sprintf("failed to store address %x", addressID)
return managerError(ErrDatabase, str, err)
}
// Update address account index
return putAddrAccountIndex(ns, scope, row.account, addrHash[:])
}
// putChainedAddress stores the provided chained address information to the
// database.
func putChainedAddress(ns walletdb.ReadWriteBucket, scope *KeyScope,
addressID []byte, account uint32, status syncStatus, branch,
index uint32, addrType addressType) error {
scopedBucket, err := fetchWriteScopeBucket(ns, scope)
if err != nil {
return err
}
addrRow := dbAddressRow{
addrType: addrType,
account: account,
addTime: uint64(time.Now().Unix()),
syncStatus: status,
rawData: serializeChainedAddress(branch, index),
}
if err := putAddress(ns, scope, addressID, &addrRow); err != nil {
return err
}
// Update the next index for the appropriate internal or external
// branch.
accountID := uint32ToBytes(account)
bucket := scopedBucket.NestedReadWriteBucket(acctBucketName)
serializedAccount := bucket.Get(accountID)
// Deserialize the account row.
row, err := deserializeAccountRow(accountID, serializedAccount)
if err != nil {
return err
}
arow, err := deserializeDefaultAccountRow(accountID, row)
if err != nil {
return err
}
// Increment the appropriate next index depending on whether the branch
// is internal or external.
nextExternalIndex := arow.nextExternalIndex
nextInternalIndex := arow.nextInternalIndex
if branch == InternalBranch {
nextInternalIndex = index + 1
} else {
nextExternalIndex = index + 1
}
// Reserialize the account with the updated index and store it.
row.rawData = serializeDefaultAccountRow(
arow.pubKeyEncrypted, arow.privKeyEncrypted, nextExternalIndex,
nextInternalIndex, arow.name,
)
err = bucket.Put(accountID, serializeAccountRow(row))
if err != nil {
str := fmt.Sprintf("failed to update next index for "+
"address %x, account %d", addressID, account)
return managerError(ErrDatabase, str, err)
}
return nil
}
// putImportedAddress stores the provided imported address information to the
// database.
func putImportedAddress(ns walletdb.ReadWriteBucket, scope *KeyScope,
addressID []byte, account uint32, status syncStatus,
encryptedPubKey, encryptedPrivKey []byte) error {
rawData := serializeImportedAddress(encryptedPubKey, encryptedPrivKey)
addrRow := dbAddressRow{
addrType: adtImport,
account: account,
addTime: uint64(time.Now().Unix()),
syncStatus: status,
rawData: rawData,
}
return putAddress(ns, scope, addressID, &addrRow)
}
// putScriptAddress stores the provided script address information to the
// database.
func putScriptAddress(ns walletdb.ReadWriteBucket, scope *KeyScope,
addressID []byte, account uint32, status syncStatus,
encryptedHash, encryptedScript []byte) error {
rawData := serializeScriptAddress(encryptedHash, encryptedScript)
addrRow := dbAddressRow{
addrType: adtScript,
account: account,
addTime: uint64(time.Now().Unix()),
syncStatus: status,
rawData: rawData,
}
if err := putAddress(ns, scope, addressID, &addrRow); err != nil {
return err
}
return nil
}
// existsAddress returns whether or not the address id exists in the database.
func existsAddress(ns walletdb.ReadBucket, scope *KeyScope, addressID []byte) bool {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return false
}
bucket := scopedBucket.NestedReadBucket(addrBucketName)
addrHash := sha256.Sum256(addressID)
return bucket.Get(addrHash[:]) != nil
}
// fetchAddrAccount returns the account to which the given address belongs to.
// It looks up the account using the addracctidx index which maps the address
// hash to its corresponding account id.
func fetchAddrAccount(ns walletdb.ReadBucket, scope *KeyScope,
addressID []byte) (uint32, error) {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return 0, err
}
bucket := scopedBucket.NestedReadBucket(addrAcctIdxBucketName)
addrHash := sha256.Sum256(addressID)
val := bucket.Get(addrHash[:])
if val == nil {
str := "address not found"
return 0, managerError(ErrAddressNotFound, str, nil)
}
return binary.LittleEndian.Uint32(val), nil
}
// forEachAccountAddress calls the given function with each address of the
// given account stored in the manager, breaking early on error.
func forEachAccountAddress(ns walletdb.ReadBucket, scope *KeyScope,
account uint32, fn func(rowInterface interface{}) error) error {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadBucket(addrAcctIdxBucketName).
NestedReadBucket(uint32ToBytes(account))
// If index bucket is missing the account, there hasn't been any
// address entries yet
if bucket == nil {
return nil
}
err = bucket.ForEach(func(k, v []byte) error {
// Skip buckets.
if v == nil {
return nil
}
addrRow, err := fetchAddressByHash(ns, scope, k)
if err != nil {
if merr, ok := err.(*ManagerError); ok {
desc := fmt.Sprintf("failed to fetch address hash '%s': %v",
k, merr.Description)
merr.Description = desc
return merr
}
return err
}
return fn(addrRow)
})
if err != nil {
return maybeConvertDbError(err)
}
return nil
}
// forEachActiveAddress calls the given function with each active address
// stored in the manager, breaking early on error.
func forEachActiveAddress(ns walletdb.ReadBucket, scope *KeyScope,
fn func(rowInterface interface{}) error) error {
scopedBucket, err := fetchReadScopeBucket(ns, scope)
if err != nil {
return err
}
bucket := scopedBucket.NestedReadBucket(addrBucketName)
err = bucket.ForEach(func(k, v []byte) error {
// Skip buckets.
if v == nil {
return nil
}
// Deserialize the address row first to determine the field
// values.
addrRow, err := fetchAddressByHash(ns, scope, k)
if merr, ok := err.(*ManagerError); ok {
desc := fmt.Sprintf("failed to fetch address hash '%s': %v",
k, merr.Description)
merr.Description = desc
return merr
}
if err != nil {
return err
}
return fn(addrRow)
})
if err != nil {
return maybeConvertDbError(err)
}
return nil
}
// deletePrivateKeys removes all private key material from the database.
//
// NOTE: Care should be taken when calling this function. It is primarily
// intended for use in converting to a watching-only copy. Removing the private
// keys from the main database without also marking it watching-only will result
// in an unusable database. It will also make any imported scripts and private
// keys unrecoverable unless there is a backup copy available.
func deletePrivateKeys(ns walletdb.ReadWriteBucket) error {
bucket := ns.NestedReadWriteBucket(mainBucketName)
// Delete the master private key params and the crypto private and
// script keys.
if err := bucket.Delete(masterPrivKeyName); err != nil {
str := "failed to delete master private key parameters"
return managerError(ErrDatabase, str, err)
}
if err := bucket.Delete(cryptoPrivKeyName); err != nil {
str := "failed to delete crypto private key"
return managerError(ErrDatabase, str, err)
}
if err := bucket.Delete(cryptoScriptKeyName); err != nil {
str := "failed to delete crypto script key"
return managerError(ErrDatabase, str, err)
}
if err := bucket.Delete(masterHDPrivName); err != nil {
str := "failed to delete master HD priv key"
return managerError(ErrDatabase, str, err)
}
// With the master key and meta encryption keys deleted, we'll need to
// delete the keys for all known scopes as well.
scopeBucket := ns.NestedReadWriteBucket(scopeBucketName)
err := scopeBucket.ForEach(func(scopeKey, _ []byte) error {
if len(scopeKey) != 8 {
return nil
}
managerScopeBucket := scopeBucket.NestedReadWriteBucket(scopeKey)
if err := managerScopeBucket.Delete(coinTypePrivKeyName); err != nil {
str := "failed to delete cointype private key"
return managerError(ErrDatabase, str, err)
}
// Delete the account extended private key for all accounts.
bucket = managerScopeBucket.NestedReadWriteBucket(acctBucketName)
err := bucket.ForEach(func(k, v []byte) error {
// Skip buckets.
if v == nil {
return nil
}
// Deserialize the account row first to determine the type.
row, err := deserializeAccountRow(k, v)
if err != nil {
return err
}
switch row.acctType {
case accountDefault:
arow, err := deserializeDefaultAccountRow(k, row)
if err != nil {
return err
}
// Reserialize the account without the private key and
// store it.
row.rawData = serializeDefaultAccountRow(
arow.pubKeyEncrypted, nil,
arow.nextExternalIndex, arow.nextInternalIndex,
arow.name,
)
err = bucket.Put(k, serializeAccountRow(row))
if err != nil {
str := "failed to delete account private key"
return managerError(ErrDatabase, str, err)
}
}
return nil
})
if err != nil {
return maybeConvertDbError(err)
}
// Delete the private key for all imported addresses.
bucket = managerScopeBucket.NestedReadWriteBucket(addrBucketName)
err = bucket.ForEach(func(k, v []byte) error {
// Skip buckets.
if v == nil {
return nil
}
// Deserialize the address row first to determine the field
// values.
row, err := deserializeAddressRow(v)
if err != nil {
return err
}
switch row.addrType {
case adtImport:
irow, err := deserializeImportedAddress(row)
if err != nil {
return err
}
// Reserialize the imported address without the private
// key and store it.
row.rawData = serializeImportedAddress(
irow.encryptedPubKey, nil)
err = bucket.Put(k, serializeAddressRow(row))
if err != nil {
str := "failed to delete imported private key"
return managerError(ErrDatabase, str, err)
}
case adtScript:
srow, err := deserializeScriptAddress(row)
if err != nil {
return err
}
// Reserialize the script address without the script
// and store it.
row.rawData = serializeScriptAddress(srow.encryptedHash,
nil)
err = bucket.Put(k, serializeAddressRow(row))
if err != nil {
str := "failed to delete imported script"
return managerError(ErrDatabase, str, err)
}
}
return nil
})
if err != nil {
return maybeConvertDbError(err)
}
return nil
})
if err != nil {
return maybeConvertDbError(err)
}
return nil
}
// fetchSyncedTo loads the block stamp the manager is synced to from the
// database.
func fetchSyncedTo(ns walletdb.ReadBucket) (*BlockStamp, error) {
bucket := ns.NestedReadBucket(syncBucketName)
// The serialized synced to format is:
// <blockheight><blockhash><timestamp>
//
// 4 bytes block height + 32 bytes hash length
buf := bucket.Get(syncedToName)
if len(buf) < 36 {
str := "malformed sync information stored in database"
return nil, managerError(ErrDatabase, str, nil)
}
var bs BlockStamp
bs.Height = int32(binary.LittleEndian.Uint32(buf[0:4]))
copy(bs.Hash[:], buf[4:36])
if len(buf) == 40 {
bs.Timestamp = time.Unix(
int64(binary.LittleEndian.Uint32(buf[36:])), 0,
)
}
return &bs, nil
}
// PutSyncedTo stores the provided synced to blockstamp to the database.
func PutSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
bucket := ns.NestedReadWriteBucket(syncBucketName)
errStr := fmt.Sprintf("failed to store sync information %v", bs.Hash)
// If the block height is greater than zero, check that the previous
// block height exists. This prevents reorg issues in the future.
// We use BigEndian so that keys/values are added to the bucket in
// order, making writes more efficient for some database backends.
if bs.Height > 0 {
if _, err := fetchBlockHash(ns, bs.Height-1); err != nil {
return managerError(ErrDatabase, errStr, err)
}
}
// Store the block hash by block height.
height := make([]byte, 4)
binary.BigEndian.PutUint32(height, uint32(bs.Height))
err := bucket.Put(height, bs.Hash[0:32])
if err != nil {
return managerError(ErrDatabase, errStr, err)
}
// The serialized synced to format is:
// <blockheight><blockhash><timestamp>
//
// 4 bytes block height + 32 bytes hash length + 4 byte timestamp length
buf := make([]byte, 40)
binary.LittleEndian.PutUint32(buf[0:4], uint32(bs.Height))
copy(buf[4:36], bs.Hash[0:32])
binary.LittleEndian.PutUint32(buf[36:], uint32(bs.Timestamp.Unix()))
err = bucket.Put(syncedToName, buf)
if err != nil {
return managerError(ErrDatabase, errStr, err)
}
return nil
}
// fetchBlockHash loads the block hash for the provided height from the
// database.
func fetchBlockHash(ns walletdb.ReadBucket, height int32) (*chainhash.Hash, error) {
bucket := ns.NestedReadBucket(syncBucketName)
errStr := fmt.Sprintf("failed to fetch block hash for height %d", height)
heightBytes := make([]byte, 4)
binary.BigEndian.PutUint32(heightBytes, uint32(height))
hashBytes := bucket.Get(heightBytes)
if hashBytes == nil {
err := errors.New("block not found")
return nil, managerError(ErrBlockNotFound, errStr, err)
}
if len(hashBytes) != 32 {
err := fmt.Errorf("couldn't get hash from database")
return nil, managerError(ErrDatabase, errStr, err)
}
var hash chainhash.Hash
if err := hash.SetBytes(hashBytes); err != nil {
return nil, managerError(ErrDatabase, errStr, err)
}
return &hash, nil
}
// FetchStartBlock loads the start block stamp for the manager from the
// database.
func FetchStartBlock(ns walletdb.ReadBucket) (*BlockStamp, error) {
bucket := ns.NestedReadBucket(syncBucketName)
// The serialized start block format is:
// <blockheight><blockhash>
//
// 4 bytes block height + 32 bytes hash length
buf := bucket.Get(startBlockName)
if len(buf) != 36 {
str := "malformed start block stored in database"
return nil, managerError(ErrDatabase, str, nil)
}
var bs BlockStamp
bs.Height = int32(binary.LittleEndian.Uint32(buf[0:4]))
copy(bs.Hash[:], buf[4:36])
return &bs, nil
}
// putStartBlock stores the provided start block stamp to the database.
func putStartBlock(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
bucket := ns.NestedReadWriteBucket(syncBucketName)
// The serialized start block format is:
// <blockheight><blockhash>
//
// 4 bytes block height + 32 bytes hash length
buf := make([]byte, 36)
binary.LittleEndian.PutUint32(buf[0:4], uint32(bs.Height))
copy(buf[4:36], bs.Hash[0:32])
err := bucket.Put(startBlockName, buf)
if err != nil {
str := fmt.Sprintf("failed to store start block %v", bs.Hash)
return managerError(ErrDatabase, str, err)
}
return nil
}
// fetchBirthday loads the manager's bithday timestamp from the database.
func fetchBirthday(ns walletdb.ReadBucket) (time.Time, error) {
var t time.Time
bucket := ns.NestedReadBucket(syncBucketName)
birthdayTimestamp := bucket.Get(birthdayName)
if len(birthdayTimestamp) != 8 {
str := "malformed birthday stored in database"
return t, managerError(ErrDatabase, str, nil)
}
t = time.Unix(int64(binary.BigEndian.Uint64(birthdayTimestamp)), 0)
return t, nil
}
// putBirthday stores the provided birthday timestamp to the database.
func putBirthday(ns walletdb.ReadWriteBucket, t time.Time) error {
var birthdayTimestamp [8]byte
binary.BigEndian.PutUint64(birthdayTimestamp[:], uint64(t.Unix()))
bucket := ns.NestedReadWriteBucket(syncBucketName)
if err := bucket.Put(birthdayName, birthdayTimestamp[:]); err != nil {
str := "failed to store birthday"
return managerError(ErrDatabase, str, err)
}
return nil
}
// FetchBirthdayBlock retrieves the birthday block from the database.
//
// The block is serialized as follows:
// [0:4] block height
// [4:36] block hash
// [36:44] block timestamp
func FetchBirthdayBlock(ns walletdb.ReadBucket) (BlockStamp, error) {
var block BlockStamp
bucket := ns.NestedReadBucket(syncBucketName)
birthdayBlock := bucket.Get(birthdayBlockName)
if birthdayBlock == nil {
str := "birthday block not set"
return block, managerError(ErrBirthdayBlockNotSet, str, nil)
}
if len(birthdayBlock) != 44 {
str := "malformed birthday block stored in database"
return block, managerError(ErrDatabase, str, nil)
}
block.Height = int32(binary.BigEndian.Uint32(birthdayBlock[:4]))
copy(block.Hash[:], birthdayBlock[4:36])
t := int64(binary.BigEndian.Uint64(birthdayBlock[36:]))
block.Timestamp = time.Unix(t, 0)
return block, nil
}
// putBirthdayBlock stores the provided birthday block to the database.
//
// The block is serialized as follows:
// [0:4] block height
// [4:36] block hash
// [36:44] block timestamp
func putBirthdayBlock(ns walletdb.ReadWriteBucket, block BlockStamp) error {
var birthdayBlock [44]byte
binary.BigEndian.PutUint32(birthdayBlock[:4], uint32(block.Height))
copy(birthdayBlock[4:36], block.Hash[:])
binary.BigEndian.PutUint64(birthdayBlock[36:], uint64(block.Timestamp.Unix()))
bucket := ns.NestedReadWriteBucket(syncBucketName)
if err := bucket.Put(birthdayBlockName, birthdayBlock[:]); err != nil {
str := "failed to store birthday block"
return managerError(ErrDatabase, str, err)
}
return nil
}
// fetchBirthdayBlockVerification retrieves the bit that determines whether the
// wallet has verified that its birthday block is correct.
func fetchBirthdayBlockVerification(ns walletdb.ReadBucket) bool {
bucket := ns.NestedReadBucket(syncBucketName)
verifiedValue := bucket.Get(birthdayBlockVerifiedName)
// If there is no verification status, we can assume it has not been
// verified yet.
if verifiedValue == nil {
return false
}
// Otherwise, we'll determine if it's verified by the value stored.
verified := binary.BigEndian.Uint16(verifiedValue[:])
return verified != 0
}
// putBirthdayBlockVerification stores a bit that determines whether the
// birthday block has been verified by the wallet to be correct.
func putBirthdayBlockVerification(ns walletdb.ReadWriteBucket, verified bool) error {
// Convert the boolean to an integer in its binary representation as
// there is no way to insert a boolean directly as a value of a
// key/value pair.
verifiedValue := uint16(0)
if verified {
verifiedValue = 1
}
var verifiedBytes [2]byte
binary.BigEndian.PutUint16(verifiedBytes[:], verifiedValue)
bucket := ns.NestedReadWriteBucket(syncBucketName)
err := bucket.Put(birthdayBlockVerifiedName, verifiedBytes[:])
if err != nil {
str := "failed to store birthday block verification"
return managerError(ErrDatabase, str, err)
}
return nil
}
// managerExists returns whether or not the manager has already been created
// in the given database namespace.
func managerExists(ns walletdb.ReadBucket) bool {
if ns == nil {
return false
}
mainBucket := ns.NestedReadBucket(mainBucketName)
return mainBucket != nil
}
// createScopedManagerNS creates the namespace buckets for a new registered
// manager scope within the top level bucket. All relevant sub-buckets that a
// ScopedManager needs to perform its duties are also created.
func createScopedManagerNS(ns walletdb.ReadWriteBucket, scope *KeyScope) error {
// First, we'll create the scope bucket itself for this particular
// scope.
scopeKey := scopeToBytes(scope)
scopeBucket, err := ns.CreateBucket(scopeKey[:])
if err != nil {
str := "failed to create sync bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(acctBucketName)
if err != nil {
str := "failed to create account bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(addrBucketName)
if err != nil {
str := "failed to create address bucket"
return managerError(ErrDatabase, str, err)
}
// usedAddrBucketName bucket was added after manager version 1 release
_, err = scopeBucket.CreateBucket(usedAddrBucketName)
if err != nil {
str := "failed to create used addresses bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(addrAcctIdxBucketName)
if err != nil {
str := "failed to create address index bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(acctNameIdxBucketName)
if err != nil {
str := "failed to create an account name index bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(acctIDIdxBucketName)
if err != nil {
str := "failed to create an account id index bucket"
return managerError(ErrDatabase, str, err)
}
_, err = scopeBucket.CreateBucket(metaBucketName)
if err != nil {
str := "failed to create a meta bucket"
return managerError(ErrDatabase, str, err)
}
return nil
}
// createManagerNS creates the initial namespace structure needed for all of
// the manager data. This includes things such as all of the buckets as well
// as the version and creation date. In addition to creating the key space for
// the root address manager, we'll also create internal scopes for all the
// default manager scope types.
func createManagerNS(ns walletdb.ReadWriteBucket,
defaultScopes map[KeyScope]ScopeAddrSchema) error {
// First, we'll create all the relevant buckets that stem off of the
// main bucket.
mainBucket, err := ns.CreateBucket(mainBucketName)
if err != nil {
str := "failed to create main bucket"
return managerError(ErrDatabase, str, err)
}
_, err = ns.CreateBucket(syncBucketName)
if err != nil {
str := "failed to create sync bucket"
return managerError(ErrDatabase, str, err)
}
// We'll also create the two top-level scope related buckets as
// preparation for the operations below.
scopeBucket, err := ns.CreateBucket(scopeBucketName)
if err != nil {
str := "failed to create scope bucket"
return managerError(ErrDatabase, str, err)
}
scopeSchemas, err := ns.CreateBucket(scopeSchemaBucketName)
if err != nil {
str := "failed to create scope schema bucket"
return managerError(ErrDatabase, str, err)
}
// Next, we'll create the namespace for each of the relevant default
// manager scopes.
for scope, scopeSchema := range defaultScopes {
// Before we create the entire namespace of this scope, we'll
// update the schema mapping to note what types of addresses it
// prefers.
scopeKey := scopeToBytes(&scope)
schemaBytes := scopeSchemaToBytes(&scopeSchema)
err := scopeSchemas.Put(scopeKey[:], schemaBytes)
if err != nil {
return err
}
err = createScopedManagerNS(scopeBucket, &scope)
if err != nil {
return err
}
err = putLastAccount(ns, &scope, DefaultAccountNum)
if err != nil {
return err
}
}
if err := putManagerVersion(ns, latestMgrVersion); err != nil {
return err
}
createDate := uint64(time.Now().Unix())
var dateBytes [8]byte
binary.LittleEndian.PutUint64(dateBytes[:], createDate)
err = mainBucket.Put(mgrCreateDateName, dateBytes[:])
if err != nil {
str := "failed to store database creation time"
return managerError(ErrDatabase, str, err)
}
return nil
}
| putAccountIDIndex |
models_suite_test.go | package models_test
import (
"flag"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var pgHost string
func init() {
flag.StringVar(&pgHost, "pg.host", "localhost", "")
}
func TestModels(t *testing.T) {
RegisterFailHandler(Fail) | RunSpecs(t, "Models Suite")
} | |
messages_test.go | package loop3
import (
"bytes"
"crypto/sha512"
"github.com/google/go-cmp/cmp"
loop3_pb "github.com/openziti/ziti/ziti-fabric-test/subcmd/loop3/pb"
"github.com/stretchr/testify/require"
"math/rand"
"reflect"
"testing"
)
type testPeer struct {
bytes.Buffer
}
func (t *testPeer) Close() error {
return nil
}
func Test_MessageSerDeser(t *testing.T) {
req := require.New(t)
data := make([]byte, 4192)
rand.Read(data)
hash := sha512.Sum512(data)
block := &RandHashedBlock{
Type: BlockTypePlain,
Sequence: 10,
Hash: hash[:],
Data: data,
}
testBuf := &testPeer{}
p := &protocol{
peer: testBuf,
test: &loop3_pb.Test{
Name: "test",
}, |
readBlock := &RandHashedBlock{}
req.NoError(readBlock.Rx(p))
req.True(reflect.DeepEqual(block, readBlock), cmp.Diff(block, readBlock))
data = make([]byte, 4192)
rand.Read(data)
hash = sha512.Sum512(data)
block = &RandHashedBlock{
Type: BlockTypeLatencyRequest,
Sequence: 10,
Hash: hash[:],
Data: data,
}
req.NoError(block.Tx(p))
readBlock = &RandHashedBlock{}
req.NoError(readBlock.Rx(p))
req.Equal("", cmp.Diff(block, readBlock))
} | }
req.NoError(block.Tx(p)) |
roll.rs | use crate::{
commands::{MyCommand, MyCommandOption},
error::Error,
util::MessageExt,
BotResult, CommandData, Context, MessageBuilder,
};
use rand::Rng;
use std::sync::Arc;
use twilight_model::application::interaction::{
application_command::CommandOptionValue, ApplicationCommand,
};
const DEFAULT_LIMIT: u64 = 100;
#[command]
#[short_desc("Get a random number")]
#[long_desc( | #[usage("[upper limit]")]
#[no_typing()]
async fn roll(ctx: Arc<Context>, data: CommandData) -> BotResult<()> {
match data {
CommandData::Message { msg, mut args, num } => {
let limit = match num {
Some(n) => n as u64,
None => match args.next().map(|arg| arg.parse()) {
Some(Ok(n)) => n,
None | Some(Err(_)) => DEFAULT_LIMIT,
},
};
_roll(ctx, CommandData::Message { msg, args, num }, limit).await
}
CommandData::Interaction { command } => slash_roll(ctx, *command).await,
}
}
async fn _roll(ctx: Arc<Context>, data: CommandData<'_>, limit: u64) -> BotResult<()> {
let num = rand::thread_rng().gen_range(1..(limit + 1).max(2));
let author_id = data.author()?.id;
let description = format!(
"<@{author_id}> rolls {num} point{} :game_die:",
if num == 1 { "" } else { "s" }
);
let builder = MessageBuilder::new().embed(description);
data.create_message(&ctx, builder).await?;
Ok(())
}
pub async fn slash_roll(ctx: Arc<Context>, command: ApplicationCommand) -> BotResult<()> {
let mut limit = None;
if let Some(option) = command.data.options.first() {
let option = (option.name == "limit").then(|| match option.value {
CommandOptionValue::Integer(value) => Some(value),
_ => None,
});
match option.flatten() {
Some(value) => limit = Some(value.max(0) as u64),
None => return Err(Error::InvalidCommandOptions),
}
}
_roll(ctx, command.into(), limit.unwrap_or(DEFAULT_LIMIT)).await
}
pub fn define_roll() -> MyCommand {
let limit = MyCommandOption::builder("limit", "Specify an upper limit, defaults to 100")
.min_int(0)
.integer(Vec::new(), false);
MyCommand::new("roll", "Roll a random number").options(vec![limit])
} | "Get a random number.\n\
If no upper limit is specified, it defaults to 100."
)] |
finger.py | import cv2
import mediapipe
import numpy
import pydirectinput
class FingerDetector:
wScr, hScr = pydirectinput.size() #Get the current screen resolution
pX, pY = 0, 0
cX, cY = 0, 0
def __init__(self):
"""
Initialize all objects
"""
#Load the mediapipe libraries/solutions
self.initHand = mediapipe.solutions.hands
self.mainHand = self.initHand.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.draw = mediapipe.solutions.drawing_utils
self.fingerTips = []
self.img = None
def handLandmarks(self, colorImg):
"""
Detect the hand landmarks
"""
landmarkList = []
landmarkPositions = self.mainHand.process(colorImg) # Process the given image
landmarkCheck = landmarkPositions.multi_hand_landmarks
if landmarkCheck: # Checks if landmarks exist
for index, hand in enumerate(landmarkCheck): # differentiate by hand
for index, landmark in enumerate(hand.landmark):
self.draw.draw_landmarks(self.img, hand, self.initHand.HAND_CONNECTIONS)
h, w, c = self.img.shape
centerX, centerY = int(landmark.x * w), int(landmark.y * h)
landmarkList.append([index, centerX, centerY])
return landmarkList
def fingers(self, landmarks):
"""
Check the action of the fingers
"""
fingerTips = []
tipIds = [4, 8, 12, 16, 20] #Values for each fingertip
#Check if the thumb is up
if landmarks[tipIds[0]][1] > self.lmList[tipIds[0] - 1][1]:
fingerTips.append(1)
else:
fingerTips.append(0)
#Check if fingers are up and the thumb is down
for id in range(1, 5):
if landmarks[tipIds[id]][2] < landmarks[tipIds[id] - 3][2]: # Checks to see if the tip of the finger is higher than the joint
|
else:
fingerTips.append(0)
return fingerTips
def fingerDetection(self, frame):
"""
Detect the fingers positions through the frame
"""
frame = cv2.flip(frame, 1)
self.img = frame
imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Changes the format of the frames from BGR to RGB
self.lmList = self.handLandmarks(imgRGB)
if len(self.lmList) > 12:
x1, y1 = self.lmList[8][1:]
finger = self.fingers(self.lmList)
if finger[1] == 1 and finger[2] == 0:
x3 = numpy.interp(x1, (75, 720 - 75), (75, self.wScr)) # Converts the width of the window relative to the screen width
y3 = numpy.interp(y1, (75, 560 - 75), (75, self.hScr)) # Converts the height of the window relative to the screen height
cX = self.pX + (x3 - self.pX) /2 # Smooth out the mouse x movement
cY = self.pY + (y3 - self.pY) /2 # Smooth out the mouse y movement
pydirectinput.moveTo(int(cX), int(cY)) #Move the mouse using pydirectinput
self.pX, self.pY = cX, cY # Save the current x and y values
if finger[1] == 0 and finger[0] == 1: # Check if the pointer finger is down and the thumb finger is up
pydirectinput.rightClick()
return
| fingerTips.append(1) |
time_warp.rs | use instant::Instant;
use abstutil::prettyprint_usize;
use geom::{Duration, Polygon, Pt2D, Ring, Time};
use map_gui::render::DrawOptions;
use map_gui::tools::{grey_out_map, PopupMsg};
use map_gui::ID;
use widgetry::{
Choice, DrawBaselayer, EventCtx, GeomBatch, GfxCtx, Key, Line, Outcome, Panel, ScreenDims,
Slider, State, TabController, Text, Toggle, UpdateType, Widget,
};
use crate::app::{App, FindDelayedIntersections, ShowEverything, Transition};
use crate::common::Warping;
use crate::sandbox::{GameplayMode, SandboxMode};
// TODO Text entry would be great
pub struct JumpToTime {
panel: Panel,
target: Time,
maybe_mode: Option<GameplayMode>,
tabs: TabController,
}
impl JumpToTime {
pub fn new_state(
ctx: &mut EventCtx,
app: &App,
maybe_mode: Option<GameplayMode>,
) -> Box<dyn State<App>> {
let target = app.primary.sim.time();
let end_of_day = app.primary.sim.get_end_of_day();
let jump_to_time_btn = ctx
.style()
.btn_tab
.text("Jump to time")
.hotkey(Key::T)
.tooltip("Jump to time");
let jump_to_time_content = {
// TODO Auto-fill width?
let slider_width = 500.0;
Widget::col(vec![
Line("Jump to what time?").small_heading().into_widget(ctx),
if app.has_prebaked().is_some() {
GeomBatch::from(vec![(
ctx.style().icon_fg.alpha(0.7),
area_under_curve(
app.prebaked().active_agents(end_of_day),
slider_width,
50.0,
),
)])
.into_widget(ctx)
} else {
Widget::nothing()
},
Slider::area(ctx, slider_width, target.to_percent(end_of_day).min(1.0))
.named("time slider"),
build_jump_to_time_btn(ctx, target),
])
};
let jump_to_delay_btn = ctx
.style()
.btn_tab
.text("Jump to delay")
.hotkey(Key::D)
.tooltip("Jump to delay");
let jump_to_delay_content = Widget::col(vec![
Widget::row(vec![
Line("Jump to next").small_heading().into_widget(ctx),
Widget::dropdown(
ctx,
"delay",
app.opts.jump_to_delay,
vec![
Choice::new("1", Duration::minutes(1)),
Choice::new("2", Duration::minutes(2)),
Choice::new("5", Duration::minutes(5)),
Choice::new("10", Duration::minutes(10)),
],
),
Line("minute delay").small_heading().into_widget(ctx),
]),
build_jump_to_delay_button(ctx, app.opts.jump_to_delay),
]);
let mut tabs = TabController::new("jump_to_time_tabs");
tabs.push_tab(jump_to_time_btn, jump_to_time_content);
tabs.push_tab(jump_to_delay_btn, jump_to_delay_content);
Box::new(JumpToTime {
target,
maybe_mode,
panel: Panel::new_builder(Widget::col(vec![
ctx.style().btn_close_widget(ctx),
tabs.build_widget(ctx),
]))
.exact_size(ScreenDims::new(640.0, 360.0))
.build(ctx),
tabs,
})
}
}
impl State<App> for JumpToTime {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
match self.panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"close" => {
return Transition::Pop;
}
"jump to time" => {
if self.target < app.primary.sim.time() {
if let Some(mode) = self.maybe_mode.take() {
let target_time = self.target;
return Transition::Replace(SandboxMode::async_new(
app,
mode,
Box::new(move |ctx, app| {
vec![Transition::Push(TimeWarpScreen::new_state(
ctx,
app,
target_time,
None,
))]
}),
));
} else {
return Transition::Replace(PopupMsg::new_state(
ctx,
"Error",
vec!["Sorry, you can't go rewind time from this mode."],
));
}
}
return Transition::Replace(TimeWarpScreen::new_state(
ctx,
app,
self.target,
None,
));
}
"jump to delay" => {
let delay = self.panel.dropdown_value("delay");
app.opts.jump_to_delay = delay;
return Transition::Replace(TimeWarpScreen::new_state(
ctx,
app,
app.primary.sim.get_end_of_day(),
Some(delay),
));
}
action => {
if self.tabs.handle_action(ctx, action, &mut self.panel) {
// if true, tabs has handled the action
} else {
unreachable!("unhandled action: {}", action)
}
}
},
Outcome::Changed(_) => {
if self.tabs.active_tab_idx() == 1 {
self.panel.replace(
ctx,
"jump to delay",
build_jump_to_delay_button(ctx, self.panel.dropdown_value("delay")),
);
}
}
_ => {}
}
if self.tabs.active_tab_idx() == 0 {
let target = app
.primary
.sim
.get_end_of_day()
.percent_of(self.panel.slider("time slider").get_percent())
.round_seconds(600.0);
if target != self.target {
self.target = target;
self.panel
.replace(ctx, "jump to time", build_jump_to_time_btn(ctx, target));
}
}
if self.panel.clicked_outside(ctx) {
return Transition::Pop;
}
Transition::Keep
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
grey_out_map(g, app);
self.panel.draw(g);
}
}
// Display a nicer screen for jumping forwards in time, allowing cancellation.
pub struct TimeWarpScreen {
target: Time,
wall_time_started: Instant,
sim_time_started: geom::Time,
halt_upon_delay: Option<Duration>,
panel: Panel,
}
impl TimeWarpScreen {
pub fn new_state(
ctx: &mut EventCtx,
app: &mut App,
target: Time,
mut halt_upon_delay: Option<Duration>,
) -> Box<dyn State<App>> {
if let Some(halt_limit) = halt_upon_delay {
if app.primary.sim_cb.is_none() {
app.primary.sim_cb = Some(Box::new(FindDelayedIntersections {
halt_limit,
report_limit: halt_limit,
currently_delayed: Vec::new(),
}));
// TODO Can we get away with less frequently? Not sure about all the edge cases
app.primary.sim.set_periodic_callback(Duration::minutes(1));
} else {
halt_upon_delay = None;
}
}
Box::new(TimeWarpScreen {
target,
wall_time_started: Instant::now(),
sim_time_started: app.primary.sim.time(),
halt_upon_delay,
panel: Panel::new_builder(
Widget::col(vec![
Text::new().into_widget(ctx).named("text"),
Toggle::checkbox(
ctx,
"skip drawing (for faster simulations)",
Key::Space,
app.opts.dont_draw_time_warp,
)
.named("don't draw"),
ctx.style()
.btn_outline
.text("stop now")
.hotkey(Key::Escape)
.build_def(ctx)
.centered_horiz(),
])
// hardcoded width avoids jiggle due to text updates
.force_width(700.0),
)
.build(ctx),
})
}
}
impl State<App> for TimeWarpScreen {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
if ctx.input.nonblocking_is_update_event().is_some() {
ctx.input.use_update_event();
app.primary.sim.time_limited_step(
&app.primary.map,
self.target - app.primary.sim.time(),
Duration::seconds(0.033),
&mut app.primary.sim_cb,
);
#[allow(clippy::never_loop)]
for (t, maybe_i, alert) in app.primary.sim.clear_alerts() {
// TODO Just the first :(
return Transition::Replace(PopupMsg::new_state(
ctx,
"Alert",
vec![format!("At {}, near {:?}, {}", t, maybe_i, alert)],
));
}
if let Some(ref mut cb) = app.primary.sim_cb {
let di = cb.downcast_mut::<FindDelayedIntersections>().unwrap();
if let Some((i, t)) = di.currently_delayed.get(0) {
if app.primary.sim.time() - *t > di.halt_limit {
let id = ID::Intersection(*i);
app.primary.layer =
Some(Box::new(crate::layer::traffic::TrafficJams::new(ctx, app)));
return Transition::Replace(Warping::new_state(
ctx,
app.primary.canonical_point(id.clone()).unwrap(),
Some(10.0),
Some(id),
&mut app.primary,
));
}
}
}
let now = app.primary.sim.time();
let (finished_after, _) = app.primary.sim.num_trips();
let finished_before = if app.has_prebaked().is_some() {
let mut cnt = 0;
for (t, _, _, _) in &app.prebaked().finished_trips {
if *t > now {
break;
}
cnt += 1;
}
Some(cnt)
} else {
None
};
let elapsed_sim_time = now - self.sim_time_started;
let elapsed_wall_time = Duration::realtime_elapsed(self.wall_time_started);
let txt = Text::from_multiline(vec![
// I'm covered in shame for not doing this from the start.
Line("Let's do the time warp again!").small_heading(),
Line(format!(
"{} / {}",
now.ampm_tostring(),
self.target.ampm_tostring()
)),
Line(format!(
"Speed: {}x",
prettyprint_usize((elapsed_sim_time / elapsed_wall_time) as usize)
)),
if let Some(n) = finished_before {
// TODO Underline
Line(format!(
"Finished trips: {} ({} compared to before \"{}\")",
prettyprint_usize(finished_after),
compare_count(finished_after, n),
app.primary.map.get_edits().edits_name,
))
} else {
Line(format!(
"Finished trips: {}",
prettyprint_usize(finished_after)
))
},
]);
self.panel.replace(ctx, "text", txt.into_widget(ctx));
}
// >= because of the case of resetting to midnight. GameplayMode::initialize takes a tiny
// step past midnight after spawning things, so that agents initially appear on the map.
if app.primary.sim.time() >= self.target {
return Transition::Pop;
}
match self.panel.event(ctx) {
Outcome::Changed(_) => {
app.opts.dont_draw_time_warp = self.panel.is_checked("don't draw");
}
Outcome::Clicked(x) => match x.as_ref() {
"stop now" => {
return Transition::Pop;
}
_ => unreachable!(),
},
_ => {}
}
if self.panel.clicked_outside(ctx) {
return Transition::Pop;
}
ctx.request_update(UpdateType::Game);
Transition::Keep
}
fn draw_baselayer(&self) -> DrawBaselayer {
DrawBaselayer::Custom
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
if app.opts.dont_draw_time_warp {
g.clear(app.cs.inner_panel_bg);
} else {
app.draw(g, DrawOptions::new(), &ShowEverything::new());
grey_out_map(g, app);
}
self.panel.draw(g);
}
fn on_destroy(&mut self, _: &mut EventCtx, app: &mut App) {
if self.halt_upon_delay.is_some() {
assert!(app.primary.sim_cb.is_some());
app.primary.sim_cb = None;
app.primary.sim.unset_periodic_callback();
}
}
}
fn area_under_curve(raw: Vec<(Time, usize)>, width: f64, height: f64) -> Polygon {
assert!(!raw.is_empty());
let min_x = Time::START_OF_DAY; | let min_y = 0;
let max_x = raw.last().unwrap().0;
let max_y = raw.iter().max_by_key(|(_, cnt)| *cnt).unwrap().1;
let mut pts = Vec::new();
for (t, cnt) in raw {
pts.push(lttb::DataPoint::new(
width * (t - min_x) / (max_x - min_x),
height * (1.0 - (((cnt - min_y) as f64) / ((max_y - min_y) as f64))),
));
}
let mut downsampled = Vec::new();
for pt in lttb::lttb(pts, 100) {
downsampled.push(Pt2D::new(pt.x, pt.y));
}
downsampled.push(Pt2D::new(width, height));
downsampled.push(downsampled[0]);
Ring::must_new(downsampled).into_polygon()
}
// TODO Maybe color, put in helpers
fn compare_count(after: usize, before: usize) -> String {
match after.cmp(&before) {
std::cmp::Ordering::Equal => "+0".to_string(),
std::cmp::Ordering::Greater => {
format!("+{}", prettyprint_usize(after - before))
}
std::cmp::Ordering::Less => {
format!("-{}", prettyprint_usize(before - after))
}
}
}
fn build_jump_to_time_btn(ctx: &EventCtx, target: Time) -> Widget {
ctx.style()
.btn_solid_primary
.text(format!("Jump to {}", target.ampm_tostring()))
.hotkey(Key::Enter)
.build_widget(ctx, "jump to time")
.centered_horiz()
.margin_above(16)
}
fn build_jump_to_delay_button(ctx: &EventCtx, delay: Duration) -> Widget {
ctx.style()
.btn_solid_primary
.text(format!("Jump to next {} delay", delay))
.hotkey(Key::Enter)
.build_widget(ctx, "jump to delay")
.centered_horiz()
.margin_above(16)
} | |
QueryType.go | package QueryType
type QueryType uint16
var (
UNKNOWN QueryType = 0
A QueryType = 1
)
func QueryTypeToInt(QT QueryType) uint16 |
func IntToQueryType(Int uint16) QueryType {
if Int == 1 {
return A
}
return UNKNOWN
}
| {
if QT == A {
return 1
}
return uint16(UNKNOWN)
} |
ban_user.rs | //! `POST /_matrix/client/*/rooms/{roomId}/ban`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidban
use ruma_api::ruma_api;
use ruma_identifiers::{RoomId, UserId};
ruma_api! {
metadata: {
description: "Ban a user from a room.",
method: POST,
name: "ban_user",
r0_path: "/_matrix/client/r0/rooms/:room_id/ban",
stable_path: "/_matrix/client/v3/rooms/:room_id/ban",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to kick the user from.
#[ruma_api(path)]
pub room_id: &'a RoomId,
/// The user to ban.
pub user_id: &'a UserId,
/// The reason for banning the user.
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<&'a str>,
}
#[derive(Default)]
response: {}
error: crate::Error
}
| }
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
}
} | impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id and room id.
pub fn new(room_id: &'a RoomId, user_id: &'a UserId) -> Self {
Self { room_id, user_id, reason: None } |
shared_context_safe.rs | use super::*;
use std::any::Any;
#[derive(Debug)]
struct | (Box<dyn Any + Send + Sync>);
// This will simply fail to compile if the test fails, so we just always pass.
#[test]
fn shared_context_has_send_and_sync() -> Result<()> {
let container = SendSyncContainer(Box::new(StandardContext::new()));
println!("{:?}", container);
Ok(())
}
| SendSyncContainer |
fix-webm-duration.js | (function (name, definition) {
if (typeof define === 'function' && define.amd) { // RequireJS / AMD
define(definition);
} else if (typeof module !== 'undefined' && module.exports) { // CommonJS / Node.js
module.exports = definition();
} else { // Direct include
window.ysFixWebmDuration = definition();
}
})('fix-webm-duration', function () {
/*
* This is the list of possible WEBM file sections by their IDs.
* Possible types: Container, Binary, Uint, Int, String, Float, Date
*/
var sections = {
0xa45dfa3: { name: 'EBML', type: 'Container' },
0x286: { name: 'EBMLVersion', type: 'Uint' },
0x2f7: { name: 'EBMLReadVersion', type: 'Uint' },
0x2f2: { name: 'EBMLMaxIDLength', type: 'Uint' },
0x2f3: { name: 'EBMLMaxSizeLength', type: 'Uint' },
0x282: { name: 'DocType', type: 'String' },
0x287: { name: 'DocTypeVersion', type: 'Uint' },
0x285: { name: 'DocTypeReadVersion', type: 'Uint' },
0x6c: { name: 'Void', type: 'Binary' },
0x3f: { name: 'CRC-32', type: 'Binary' },
0xb538667: { name: 'SignatureSlot', type: 'Container' },
0x3e8a: { name: 'SignatureAlgo', type: 'Uint' },
0x3e9a: { name: 'SignatureHash', type: 'Uint' },
0x3ea5: { name: 'SignaturePublicKey', type: 'Binary' },
0x3eb5: { name: 'Signature', type: 'Binary' },
0x3e5b: { name: 'SignatureElements', type: 'Container' },
0x3e7b: { name: 'SignatureElementList', type: 'Container' },
0x2532: { name: 'SignedElement', type: 'Binary' },
0x8538067: { name: 'Segment', type: 'Container' },
0x14d9b74: { name: 'SeekHead', type: 'Container' },
0xdbb: { name: 'Seek', type: 'Container' },
0x13ab: { name: 'SeekID', type: 'Binary' },
0x13ac: { name: 'SeekPosition', type: 'Uint' },
0x549a966: { name: 'Info', type: 'Container' },
0x33a4: { name: 'SegmentUID', type: 'Binary' },
0x3384: { name: 'SegmentFilename', type: 'String' },
0x1cb923: { name: 'PrevUID', type: 'Binary' },
0x1c83ab: { name: 'PrevFilename', type: 'String' },
0x1eb923: { name: 'NextUID', type: 'Binary' },
0x1e83bb: { name: 'NextFilename', type: 'String' },
0x444: { name: 'SegmentFamily', type: 'Binary' },
0x2924: { name: 'ChapterTranslate', type: 'Container' },
0x29fc: { name: 'ChapterTranslateEditionUID', type: 'Uint' },
0x29bf: { name: 'ChapterTranslateCodec', type: 'Uint' },
0x29a5: { name: 'ChapterTranslateID', type: 'Binary' },
0xad7b1: { name: 'TimecodeScale', type: 'Uint' },
0x489: { name: 'Duration', type: 'Float' },
0x461: { name: 'DateUTC', type: 'Date' },
0x3ba9: { name: 'Title', type: 'String' },
0xd80: { name: 'MuxingApp', type: 'String' },
0x1741: { name: 'WritingApp', type: 'String' },
// 0xf43b675: { name: 'Cluster', type: 'Container' },
0x67: { name: 'Timecode', type: 'Uint' },
0x1854: { name: 'SilentTracks', type: 'Container' },
0x18d7: { name: 'SilentTrackNumber', type: 'Uint' },
0x27: { name: 'Position', type: 'Uint' },
0x2b: { name: 'PrevSize', type: 'Uint' },
0x23: { name: 'SimpleBlock', type: 'Binary' },
0x20: { name: 'BlockGroup', type: 'Container' },
0x21: { name: 'Block', type: 'Binary' },
0x22: { name: 'BlockVirtual', type: 'Binary' },
0x35a1: { name: 'BlockAdditions', type: 'Container' },
0x26: { name: 'BlockMore', type: 'Container' },
0x6e: { name: 'BlockAddID', type: 'Uint' },
0x25: { name: 'BlockAdditional', type: 'Binary' },
0x1b: { name: 'BlockDuration', type: 'Uint' },
0x7a: { name: 'ReferencePriority', type: 'Uint' },
0x7b: { name: 'ReferenceBlock', type: 'Int' },
0x7d: { name: 'ReferenceVirtual', type: 'Int' },
0x24: { name: 'CodecState', type: 'Binary' },
0x35a2: { name: 'DiscardPadding', type: 'Int' },
0xe: { name: 'Slices', type: 'Container' },
0x68: { name: 'TimeSlice', type: 'Container' },
0x4c: { name: 'LaceNumber', type: 'Uint' },
0x4d: { name: 'FrameNumber', type: 'Uint' },
0x4b: { name: 'BlockAdditionID', type: 'Uint' },
0x4e: { name: 'Delay', type: 'Uint' },
0x4f: { name: 'SliceDuration', type: 'Uint' },
0x48: { name: 'ReferenceFrame', type: 'Container' },
0x49: { name: 'ReferenceOffset', type: 'Uint' },
0x4a: { name: 'ReferenceTimeCode', type: 'Uint' },
0x2f: { name: 'EncryptedBlock', type: 'Binary' },
0x654ae6b: { name: 'Tracks', type: 'Container' },
0x2e: { name: 'TrackEntry', type: 'Container' },
0x57: { name: 'TrackNumber', type: 'Uint' },
0x33c5: { name: 'TrackUID', type: 'Uint' },
0x3: { name: 'TrackType', type: 'Uint' },
0x39: { name: 'FlagEnabled', type: 'Uint' },
0x8: { name: 'FlagDefault', type: 'Uint' },
0x15aa: { name: 'FlagForced', type: 'Uint' },
0x1c: { name: 'FlagLacing', type: 'Uint' },
0x2de7: { name: 'MinCache', type: 'Uint' },
0x2df8: { name: 'MaxCache', type: 'Uint' },
0x3e383: { name: 'DefaultDuration', type: 'Uint' },
0x34e7a: { name: 'DefaultDecodedFieldDuration', type: 'Uint' },
0x3314f: { name: 'TrackTimecodeScale', type: 'Float' },
0x137f: { name: 'TrackOffset', type: 'Int' },
0x15ee: { name: 'MaxBlockAdditionID', type: 'Uint' },
0x136e: { name: 'Name', type: 'String' },
0x2b59c: { name: 'Language', type: 'String' },
0x6: { name: 'CodecID', type: 'String' },
0x23a2: { name: 'CodecPrivate', type: 'Binary' },
0x58688: { name: 'CodecName', type: 'String' },
0x3446: { name: 'AttachmentLink', type: 'Uint' },
0x1a9697: { name: 'CodecSettings', type: 'String' },
0x1b4040: { name: 'CodecInfoURL', type: 'String' },
0x6b240: { name: 'CodecDownloadURL', type: 'String' },
0x2a: { name: 'CodecDecodeAll', type: 'Uint' },
0x2fab: { name: 'TrackOverlay', type: 'Uint' },
0x16aa: { name: 'CodecDelay', type: 'Uint' },
0x16bb: { name: 'SeekPreRoll', type: 'Uint' },
0x2624: { name: 'TrackTranslate', type: 'Container' },
0x26fc: { name: 'TrackTranslateEditionUID', type: 'Uint' },
0x26bf: { name: 'TrackTranslateCodec', type: 'Uint' },
0x26a5: { name: 'TrackTranslateTrackID', type: 'Binary' },
0x60: { name: 'Video', type: 'Container' },
0x1a: { name: 'FlagInterlaced', type: 'Uint' },
0x13b8: { name: 'StereoMode', type: 'Uint' },
0x13c0: { name: 'AlphaMode', type: 'Uint' },
0x13b9: { name: 'OldStereoMode', type: 'Uint' },
0x30: { name: 'PixelWidth', type: 'Uint' },
0x3a: { name: 'PixelHeight', type: 'Uint' },
0x14aa: { name: 'PixelCropBottom', type: 'Uint' },
0x14bb: { name: 'PixelCropTop', type: 'Uint' },
0x14cc: { name: 'PixelCropLeft', type: 'Uint' },
0x14dd: { name: 'PixelCropRight', type: 'Uint' },
0x14b0: { name: 'DisplayWidth', type: 'Uint' },
0x14ba: { name: 'DisplayHeight', type: 'Uint' },
0x14b2: { name: 'DisplayUnit', type: 'Uint' },
0x14b3: { name: 'AspectRatioType', type: 'Uint' },
0xeb524: { name: 'ColourSpace', type: 'Binary' },
0xfb523: { name: 'GammaValue', type: 'Float' },
0x383e3: { name: 'FrameRate', type: 'Float' },
0x61: { name: 'Audio', type: 'Container' },
0x35: { name: 'SamplingFrequency', type: 'Float' },
0x38b5: { name: 'OutputSamplingFrequency', type: 'Float' },
0x1f: { name: 'Channels', type: 'Uint' },
0x3d7b: { name: 'ChannelPositions', type: 'Binary' },
0x2264: { name: 'BitDepth', type: 'Uint' },
0x62: { name: 'TrackOperation', type: 'Container' },
0x63: { name: 'TrackCombinePlanes', type: 'Container' },
0x64: { name: 'TrackPlane', type: 'Container' },
0x65: { name: 'TrackPlaneUID', type: 'Uint' },
0x66: { name: 'TrackPlaneType', type: 'Uint' },
0x69: { name: 'TrackJoinBlocks', type: 'Container' },
0x6d: { name: 'TrackJoinUID', type: 'Uint' },
0x40: { name: 'TrickTrackUID', type: 'Uint' },
0x41: { name: 'TrickTrackSegmentUID', type: 'Binary' },
0x46: { name: 'TrickTrackFlag', type: 'Uint' },
0x47: { name: 'TrickMasterTrackUID', type: 'Uint' },
0x44: { name: 'TrickMasterTrackSegmentUID', type: 'Binary' },
0x2d80: { name: 'ContentEncodings', type: 'Container' },
0x2240: { name: 'ContentEncoding', type: 'Container' },
0x1031: { name: 'ContentEncodingOrder', type: 'Uint' },
0x1032: { name: 'ContentEncodingScope', type: 'Uint' },
0x1033: { name: 'ContentEncodingType', type: 'Uint' },
0x1034: { name: 'ContentCompression', type: 'Container' },
0x254: { name: 'ContentCompAlgo', type: 'Uint' },
0x255: { name: 'ContentCompSettings', type: 'Binary' },
0x1035: { name: 'ContentEncryption', type: 'Container' },
0x7e1: { name: 'ContentEncAlgo', type: 'Uint' },
0x7e2: { name: 'ContentEncKeyID', type: 'Binary' },
0x7e3: { name: 'ContentSignature', type: 'Binary' },
0x7e4: { name: 'ContentSigKeyID', type: 'Binary' },
0x7e5: { name: 'ContentSigAlgo', type: 'Uint' },
0x7e6: { name: 'ContentSigHashAlgo', type: 'Uint' },
0xc53bb6b: { name: 'Cues', type: 'Container' },
0x3b: { name: 'CuePoint', type: 'Container' },
0x33: { name: 'CueTime', type: 'Uint' },
0x37: { name: 'CueTrackPositions', type: 'Container' },
0x77: { name: 'CueTrack', type: 'Uint' },
0x71: { name: 'CueClusterPosition', type: 'Uint' },
0x70: { name: 'CueRelativePosition', type: 'Uint' },
0x32: { name: 'CueDuration', type: 'Uint' },
0x1378: { name: 'CueBlockNumber', type: 'Uint' },
0x6a: { name: 'CueCodecState', type: 'Uint' },
0x5b: { name: 'CueReference', type: 'Container' },
0x16: { name: 'CueRefTime', type: 'Uint' },
0x17: { name: 'CueRefCluster', type: 'Uint' },
0x135f: { name: 'CueRefNumber', type: 'Uint' },
0x6b: { name: 'CueRefCodecState', type: 'Uint' },
0x941a469: { name: 'Attachments', type: 'Container' },
0x21a7: { name: 'AttachedFile', type: 'Container' },
0x67e: { name: 'FileDescription', type: 'String' },
0x66e: { name: 'FileName', type: 'String' },
0x660: { name: 'FileMimeType', type: 'String' },
0x65c: { name: 'FileData', type: 'Binary' },
0x6ae: { name: 'FileUID', type: 'Uint' },
0x675: { name: 'FileReferral', type: 'Binary' },
0x661: { name: 'FileUsedStartTime', type: 'Uint' },
0x662: { name: 'FileUsedEndTime', type: 'Uint' },
0x43a770: { name: 'Chapters', type: 'Container' },
0x5b9: { name: 'EditionEntry', type: 'Container' },
0x5bc: { name: 'EditionUID', type: 'Uint' },
0x5bd: { name: 'EditionFlagHidden', type: 'Uint' },
0x5db: { name: 'EditionFlagDefault', type: 'Uint' },
0x5dd: { name: 'EditionFlagOrdered', type: 'Uint' },
0x36: { name: 'ChapterAtom', type: 'Container' },
0x33c4: { name: 'ChapterUID', type: 'Uint' },
0x1654: { name: 'ChapterStringUID', type: 'String' },
0x11: { name: 'ChapterTimeStart', type: 'Uint' },
0x12: { name: 'ChapterTimeEnd', type: 'Uint' },
0x18: { name: 'ChapterFlagHidden', type: 'Uint' },
0x598: { name: 'ChapterFlagEnabled', type: 'Uint' },
0x2e67: { name: 'ChapterSegmentUID', type: 'Binary' },
0x2ebc: { name: 'ChapterSegmentEditionUID', type: 'Uint' },
0x23c3: { name: 'ChapterPhysicalEquiv', type: 'Uint' },
0xf: { name: 'ChapterTrack', type: 'Container' },
0x9: { name: 'ChapterTrackNumber', type: 'Uint' },
0x0: { name: 'ChapterDisplay', type: 'Container' },
0x5: { name: 'ChapString', type: 'String' },
0x37c: { name: 'ChapLanguage', type: 'String' },
0x37e: { name: 'ChapCountry', type: 'String' },
0x2944: { name: 'ChapProcess', type: 'Container' },
0x2955: { name: 'ChapProcessCodecID', type: 'Uint' },
0x50d: { name: 'ChapProcessPrivate', type: 'Binary' },
0x2911: { name: 'ChapProcessCommand', type: 'Container' },
0x2922: { name: 'ChapProcessTime', type: 'Uint' },
0x2933: { name: 'ChapProcessData', type: 'Binary' },
0x254c367: { name: 'Tags', type: 'Container' },
0x3373: { name: 'Tag', type: 'Container' },
0x23c0: { name: 'Targets', type: 'Container' },
0x28ca: { name: 'TargetTypeValue', type: 'Uint' },
0x23ca: { name: 'TargetType', type: 'String' },
0x23c5: { name: 'TagTrackUID', type: 'Uint' },
0x23c9: { name: 'TagEditionUID', type: 'Uint' },
0x23c4: { name: 'TagChapterUID', type: 'Uint' },
0x23c6: { name: 'TagAttachmentUID', type: 'Uint' },
0x27c8: { name: 'SimpleTag', type: 'Container' },
0x5a3: { name: 'TagName', type: 'String' },
0x47a: { name: 'TagLanguage', type: 'String' },
0x484: { name: 'TagDefault', type: 'Uint' },
0x487: { name: 'TagString', type: 'String' },
0x485: { name: 'TagBinary', type: 'Binary' }
};
function doInherit(newClass, baseClass) {
newClass.prototype = Object.create(baseClass.prototype);
newClass.prototype.constructor = newClass;
}
function WebmBase(name, type) {
this.name = name || 'Unknown';
this.type = type || 'Unknown';
}
WebmBase.prototype.updateBySource = function() { };
WebmBase.prototype.setSource = function(source) {
this.source = source;
this.updateBySource();
};
WebmBase.prototype.updateByData = function() { };
WebmBase.prototype.setData = function(data) {
this.data = data;
this.updateByData();
};
function WebmUint(name, type) {
WebmBase.call(this, name, type || 'Uint');
}
doInherit(WebmUint, WebmBase);
function padHex(hex) {
return hex.length % 2 === 1 ? '0' + hex : hex;
}
WebmUint.prototype.updateBySource = function() {
// use hex representation of a number instead of number value
this.data = '';
for (var i = 0; i < this.source.length; i++) {
var hex = this.source[i].toString(16);
this.data += padHex(hex);
}
};
WebmUint.prototype.updateByData = function() {
var length = this.data.length / 2;
this.source = new Uint8Array(length);
for (var i = 0; i < length; i++) {
var hex = this.data.substr(i * 2, 2);
this.source[i] = parseInt(hex, 16);
}
};
WebmUint.prototype.getValue = function() {
return parseInt(this.data, 16);
};
WebmUint.prototype.setValue = function(value) {
this.setData(padHex(value.toString(16)));
};
function WebmFloat(name, type) {
WebmBase.call(this, name, type || 'Float');
}
doInherit(WebmFloat, WebmBase);
WebmFloat.prototype.getFloatArrayType = function() {
return this.source && this.source.length === 4 ? Float32Array : Float64Array;
};
WebmFloat.prototype.updateBySource = function() {
var byteArray = this.source.reverse();
var floatArrayType = this.getFloatArrayType();
var floatArray = new floatArrayType(byteArray.buffer);
this.data = floatArray[0];
};
WebmFloat.prototype.updateByData = function() {
var floatArrayType = this.getFloatArrayType();
var floatArray = new floatArrayType([ this.data ]);
var byteArray = new Uint8Array(floatArray.buffer);
this.source = byteArray.reverse();
};
WebmFloat.prototype.getValue = function() {
return this.data;
};
WebmFloat.prototype.setValue = function(value) {
this.setData(value);
};
function WebmContainer(name, type) {
WebmBase.call(this, name, type || 'Container');
}
doInherit(WebmContainer, WebmBase);
WebmContainer.prototype.readByte = function() {
return this.source[this.offset++];
};
WebmContainer.prototype.readUint = function() {
var firstByte = this.readByte();
var bytes = 8 - firstByte.toString(2).length;
var value = firstByte - (1 << (7 - bytes));
for (var i = 0; i < bytes; i++) {
// don't use bit operators to support x86
value *= 256;
value += this.readByte();
}
return value;
};
WebmContainer.prototype.updateBySource = function() {
this.data = [];
for (this.offset = 0; this.offset < this.source.length; this.offset = end) {
var id = this.readUint();
var len = this.readUint();
var end = Math.min(this.offset + len, this.source.length);
var data = this.source.slice(this.offset, end);
var info = sections[id] || { name: 'Unknown', type: 'Unknown' };
var ctr = WebmBase;
switch (info.type) {
case 'Container':
ctr = WebmContainer;
break;
case 'Uint':
ctr = WebmUint;
break;
case 'Float':
ctr = WebmFloat;
break;
}
var section = new ctr(info.name, info.type);
section.setSource(data);
this.data.push({
id: id,
idHex: id.toString(16),
data: section
});
}
};
WebmContainer.prototype.writeUint = function(x, draft) {
for (var bytes = 1, flag = 0x80; x >= flag && bytes < 8; bytes++, flag *= 0x80) { }
if (!draft) {
var value = flag + x;
for (var i = bytes - 1; i >= 0; i--) {
// don't use bit operators to support x86
var c = value % 256;
this.source[this.offset + i] = c;
value = (value - c) / 256;
}
}
this.offset += bytes;
};
WebmContainer.prototype.writeSections = function(draft) {
this.offset = 0;
for (var i = 0; i < this.data.length; i++) {
var section = this.data[i],
content = section.data.source,
contentLength = content.length;
this.writeUint(section.id, draft);
this.writeUint(contentLength, draft);
if (!draft) {
this.source.set(content, this.offset);
}
this.offset += contentLength;
}
return this.offset;
};
WebmContainer.prototype.updateByData = function() {
// run without accessing this.source to determine total length - need to know it to create Uint8Array
var length = this.writeSections('draft');
this.source = new Uint8Array(length);
// now really write data
this.writeSections();
};
WebmContainer.prototype.getSectionById = function(id) {
for (var i = 0; i < this.data.length; i++) {
var section = this.data[i];
if (section.id === id) {
return section.data;
}
}
return null;
};
function WebmFile(source) {
WebmContainer.call(this, 'File', 'File');
this.setSource(source);
}
doInherit(WebmFile, WebmContainer);
WebmFile.prototype.fixDuration = function(duration) {
var segmentSection = this.getSectionById(0x8538067);
if (!segmentSection) {
console.log('[fix-webm-duration] Segment section is missing');
return false;
}
var infoSection = segmentSection.getSectionById(0x549a966);
if (!infoSection) {
console.log('[fix-webm-duration] Info section is missing');
return false;
}
var timeScaleSection = infoSection.getSectionById(0xad7b1);
if (!timeScaleSection) {
console.log('[fix-webm-duration] TimecodeScale section is missing');
return false;
}
var durationSection = infoSection.getSectionById(0x489);
if (durationSection) {
if (durationSection.getValue() <= 0) {
console.log('[fix-webm-duration] Duration section is present, but the value is empty');
durationSection.setValue(duration);
} else {
console.log('[fix-webm-duration] Duration section is present');
return false;
}
} else {
console.log('[fix-webm-duration] Duration section is missing');
// append Duration section
durationSection = new WebmFloat('Duration', 'Float');
durationSection.setValue(duration);
infoSection.data.push({
id: 0x489,
data: durationSection
});
}
// set default time scale to 1 millisecond (1000000 nanoseconds)
timeScaleSection.setValue(1000000);
infoSection.updateByData();
segmentSection.updateByData();
this.updateByData();
return true;
};
WebmFile.prototype.toBlob = function() {
return new Blob([ this.source.buffer ], { type: 'video/webm' });
};
return function(blob, duration, callback) {
try {
var reader = new FileReader();
reader.onloadend = function() { | blob = file.toBlob();
}
} catch (ex) {
// ignore
}
callback(blob);
};
reader.readAsArrayBuffer(blob);
} catch (ex) {
callback(blob);
}
};
}); | try {
var file = new WebmFile(new Uint8Array(reader.result));
if (file.fixDuration(duration)) { |
lib.rs | use clap::ArgMatches;
use serde_derive::{Deserialize, Serialize};
use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
use std::time::SystemTime;
use types::ChainSpec;
/// The core configuration of a Lighthouse beacon node.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct Eth2Config {
pub spec_constants: String,
pub spec: ChainSpec,
}
impl Default for Eth2Config {
fn default() -> Self {
Self {
spec_constants: "minimal".to_string(),
spec: ChainSpec::minimal(),
}
}
}
impl Eth2Config {
pub fn mainnet() -> Self {
Self {
spec_constants: "mainnet".to_string(),
spec: ChainSpec::mainnet(),
}
}
pub fn minimal() -> Self {
Self {
spec_constants: "minimal".to_string(),
spec: ChainSpec::minimal(),
}
}
pub fn | () -> Self {
Self {
spec_constants: "interop".to_string(),
spec: ChainSpec::interop(),
}
}
}
impl Eth2Config {
/// Apply the following arguments to `self`, replacing values if they are specified in `args`.
///
/// Returns an error if arguments are obviously invalid. May succeed even if some values are
/// invalid.
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if args.is_present("recent-genesis") {
self.spec.min_genesis_time = recent_genesis_time()
}
Ok(())
}
}
/// Returns the system time, mod 30 minutes.
///
/// Used for easily creating testnets.
fn recent_genesis_time() -> u64 {
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
// genesis is now the last 30 minute block.
now - secs_after_last_period
}
/// Write a configuration to file.
pub fn write_to_file<T>(path: PathBuf, config: &T) -> Result<(), String>
where
T: Default + serde::de::DeserializeOwned + serde::Serialize,
{
if let Ok(mut file) = File::create(path.clone()) {
let toml_encoded = toml::to_string(&config).map_err(|e| {
format!(
"Failed to write configuration to {:?}. Error: {:?}",
path, e
)
})?;
file.write_all(toml_encoded.as_bytes())
.unwrap_or_else(|_| panic!("Unable to write to {:?}", path));
}
Ok(())
}
/// Loads a `ClientConfig` from file. If unable to load from file, generates a default
/// configuration and saves that as a sample file.
pub fn read_from_file<T>(path: PathBuf) -> Result<Option<T>, String>
where
T: Default + serde::de::DeserializeOwned + serde::Serialize,
{
if let Ok(mut file) = File::open(path.clone()) {
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("Unable to read {:?}. Error: {:?}", path, e))?;
let config = toml::from_str(&contents)
.map_err(|e| format!("Unable to parse {:?}: {:?}", path, e))?;
Ok(Some(config))
} else {
Ok(None)
}
}
| interop |
service_reference.rs | // Generated from definition io.k8s.api.auditregistration.v1alpha1.ServiceReference
/// ServiceReference holds a reference to Service.legacy.k8s.io
#[derive(Clone, Debug, Default, PartialEq)]
pub struct ServiceReference {
/// `name` is the name of the service. Required
pub name: String,
/// `namespace` is the namespace of the service. Required
pub namespace: String,
/// `path` is an optional URL path which will be sent in any request to this service.
pub path: Option<String>,
}
impl<'de> crate::serde::Deserialize<'de> for ServiceReference {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_name,
Key_namespace,
Key_path,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct | ;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"name" => Field::Key_name,
"namespace" => Field::Key_namespace,
"path" => Field::Key_path,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = ServiceReference;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("ServiceReference")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_name: Option<String> = None;
let mut value_namespace: Option<String> = None;
let mut value_path: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_name => value_name = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_namespace => value_namespace = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_path => value_path = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(ServiceReference {
name: value_name.ok_or_else(|| crate::serde::de::Error::missing_field("name"))?,
namespace: value_namespace.ok_or_else(|| crate::serde::de::Error::missing_field("namespace"))?,
path: value_path,
})
}
}
deserializer.deserialize_struct(
"ServiceReference",
&[
"name",
"namespace",
"path",
],
Visitor,
)
}
}
impl crate::serde::Serialize for ServiceReference {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"ServiceReference",
2 +
self.path.as_ref().map_or(0, |_| 1),
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "name", &self.name)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "namespace", &self.namespace)?;
if let Some(value) = &self.path {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "path", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
#[cfg(feature = "schemars")]
impl crate::schemars::JsonSchema for ServiceReference {
fn schema_name() -> String {
"io.k8s.api.auditregistration.v1alpha1.ServiceReference".to_owned()
}
fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema {
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("ServiceReference holds a reference to Service.legacy.k8s.io".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
properties: std::array::IntoIter::new([
(
"name".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("`name` is the name of the service. Required".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"namespace".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("`namespace` is the namespace of the service. Required".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"path".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("`path` is an optional URL path which will be sent in any request to this service.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
]).collect(),
required: std::array::IntoIter::new([
"name",
"namespace",
]).map(std::borrow::ToOwned::to_owned).collect(),
..Default::default()
})),
..Default::default()
})
}
}
| Visitor |
__init__.py | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
from os.path import dirname, abspath
from .Domoticz import Domoticz
import sys
import re
__author__ = 'mTreussart'
sys.path.append(abspath(dirname(__file__)))
LOGGER = getLogger(__name__)
class DomoticzSkill(MycroftSkill):
def __init__(self):
super(DomoticzSkill, self).__init__(name="DomoticzSkill")
def initialize(self):
domoticz_switch_intent = IntentBuilder("SwitchIntent")\
.optionally("TurnKeyword")\
.require("StateKeyword")\
.require("WhatKeyword")\
.require("WhereKeyword").build()
self.register_intent(domoticz_switch_intent,
self.handle_domoticz_switch_intent)
domoticz_infos_intent = IntentBuilder("InfosIntent")\
.require("InfosKeyword")\
.require("WhatKeyword")\
.optionally("WhereKeyword")\
.optionally("StateKeyword").build()
self.register_intent(domoticz_infos_intent,
self.handle_domoticz_infos_intent)
def handle_domoticz_switch_intent(self, message):
domoticz = Domoticz(
self.settings.get("hostname"),
self.settings.get("port"),
self.settings.get("protocol"),
self.settings.get("authentication"),
self.settings.get("username"),
self.settings.get("password"),
self.settings.get("feedback"))
state = message.data.get("StateKeyword")
what = message.data.get("WhatKeyword")
where = message.data.get("WhereKeyword")
action = message.data.get("TurnKeyword")
data = {
'what': what,
'where': where
}
LOGGER.debug("message : " + str(message.data))
response = domoticz.switch(state, what, where, action)
edng = re.compile(str(state).title(), re.I)
ending = "ed"
if edng.search('on') or edng.search('off'):
ending = ""
if response is None:
self.speak_dialog("NotFound", data)
elif response is 0:
self.speak("The " + str(what) + " is already " + str(state).title() + ending)
elif response is 1:
self.speak("The " + str(what) + " can not be operated with " + str(state).title())
elif self.settings.get("feedback"):
self.speak("The " + str(where) + " " + str(what) + " is turned " + str(state).title())
def | (self, message):
what = message.data.get("WhatKeyword")
where = message.data.get("WhereKeyword")
domoticz = Domoticz(
self.settings.get("hostname"),
self.settings.get("port"),
self.settings.get("protocol"),
self.settings.get("authentication"),
self.settings.get("username"),
self.settings.get("password"),
self.settings.get("feedback"))
data = {
'what': what,
'where': where
}
response = domoticz.get(what, where)
data = str(response['Data'])
if data is None:
if where is None:
self.speak_dialog("NotFoundShort", data)
else:
self.speak_dialog("NotFound", data)
if re.search('\d\s+C', data):
data = data.replace(' C', ' degrees celsius')
if re.search('\d\s+F', data):
data = data.replace(' F', ' degrees fahrenheit')
data = "It's " + data
LOGGER.debug("result : " + str(data))
self.speak(str(data))
def stop(self):
pass
def create_skill():
return DomoticzSkill()
| handle_domoticz_infos_intent |
util.py | import hashlib
import signify.fingerprinter
import subprocess
NUM_PCRS = 24
PCR_SIZE = hashlib.sha1().digest_size
def to_hex(buf):
import binascii
return binascii.hexlify(buf).decode()
def hexdump(buf):
for i in range(0, len(buf), 16):
row = buf[i:i+16]
offs = "0x%08x:" % i
hexs = ["%02X" % b for b in row] + [" "] * 16
text = [chr(b) if 0x20 < b < 0x7f else "." for b in row] + [" "] * 16
print(offs, " ".join(hexs[:16]), "|%s|" % "".join(text[:16]))
def hash_file(path, digest="sha1"):
h = getattr(hashlib, digest)()
with open(path, "rb") as fh:
buf = True
buf_size = 4 * 1024 * 1024
while buf:
buf = fh.read(buf_size)
h.update(buf)
return h.digest()
def hash_pecoff(path, digest="sha1"):
with open(path, "rb") as fh:
fpr = signify.fingerprinter.AuthenticodeFingerprinter(fh)
fpr.add_authenticode_hashers(getattr(hashlib, digest))
return fpr.hash()[digest]
return None
def | ():
pcrs = {x: (b"\xFF" if x in {17, 18, 19, 20, 21, 22} else b"\x00") * PCR_SIZE
for x in range(NUM_PCRS)}
return pcrs
def read_current_pcr(idx):
res = subprocess.run(["tpm2_pcrlist", "-L", "sha1:%d" % idx, "-Q", "-o", "/dev/stdout"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout
def find_mountpoint_by_partuuid(partuuid):
res = subprocess.run(["findmnt", "-S", "PARTUUID=%s" % partuuid, "-o", "TARGET", "-r", "-n"],
stdout=subprocess.PIPE)
res.check_returncode()
return res.stdout.splitlines()[0].decode()
| init_empty_pcrs |
toc.js | define(
{
"toc":[
{
"heading":"Table of Contents",
"topics":[
{
"title":"List of Figures",
"href":"lof.html"
},
{
"title":"List of Tables",
"href":"lot.html"
},
{
"title":"Title and Copyright Information",
"href":"index.html"
},
{
"title":"Preface",
"href":"preface.html#GUID-840BDD16-D5B3-40D5-A4FD-56A219F62368",
"topics":[
{
"title":"Audience",
"href":"preface.html#GUID-023FA5F6-3458-40E0-99FC-32747886C531"
},
{
"title":"Documentation Accessibility",
"href":"preface.html#GUID-E409CC44-9A8F-4043-82C8-6B95CD939296"
},
{
"title":"Related Documents",
"href":"preface.html#GUID-1A3A3881-BA70-457D-8D6B-CEBEDC70F8CC"
},
{
"title":"Conventions",
"href":"preface.html#GUID-803F795F-5E10-44A2-9719-731C8D93D258"
}
]
},
{
"title":"<span class=\"secnum\">1 </span> Introduction to Oracle Database Gateway for APPC",
"href":"appc-gateway-introduction.html#GUID-624068CA-767C-4BA8-9C4E-3051ACF26257",
"topics":[
{
"title":"Overview of the Gateway",
"href":"appc-gateway-introduction.html#GUID-51421C36-0119-468B-9176-4DEA86791EC8"
},
{
"title":"Features of the Gateway",
"href":"appc-gateway-introduction.html#GUID-4757EF36-6039-4D76-A6D1-9FBD3659E8D8"
},
{
"title":"Terms",
"href":"appc-gateway-introduction.html#GUID-400C46E0-0637-4C48-A255-C2B796438793"
},
{
"title":"Architecture of the Gateway",
"href":"appc-gateway-introduction.html#GUID-3E12BDA4-0BD9-472D-A493-CB941CBADDC6"
},
{
"title":"Implementation of the Gateway",
"href":"appc-gateway-introduction.html#GUID-1545F917-23F3-4809-9E41-48365BEC9D9E"
},
{
"title":"Communication with the Gateway",
"href":"appc-gateway-introduction.html#GUID-B7302561-3830-4052-B397-6741162B0E69"
},
{
"title":"Remote Procedural Call Functions",
"href":"appc-gateway-introduction.html#GUID-8FD607A5-4E24-4D21-8511-CCD910540486",
"topics":[
{
"title":"Description of RPC Functions",
"href":"appc-gateway-introduction.html#GUID-96043CA3-D580-4579-BACE-4865BFAE9BDD",
"topics":[
{
"title":"Remote Transaction Initiation",
"href":"appc-gateway-introduction.html#GUID-B6180A6D-EAC2-44DC-A950-9D250670E7C2"
},
{
"title":"Data Exchange",
"href":"appc-gateway-introduction.html#GUID-71F30B22-247D-4CC4-BD6E-6534ACFB9EA1"
},
{
"title":"Remote Transaction Termination",
"href":"appc-gateway-introduction.html#GUID-6B7A2585-5F55-469C-83C0-177D121405F0"
}
]
}
]
},
{
"title":"Transaction Types for Gateway Using SNA",
"href":"appc-gateway-introduction.html#GUID-E967E94D-8143-42A0-9654-1214D8F81BD6"
},
{
"title":"Transaction Types for Gateway Using TCP/IP",
"href":"appc-gateway-introduction.html#GUID-AA7DD41D-6E8F-47A2-8940-42FD187D36D8"
}
]
},
{
"title":"<span class=\"secnum\">2 </span> Release Information",
"href":"appc-gateway-release-information.html#GUID-F586427F-484F-4B21-852E-7CECA56988B6",
"topics":[
{
"title":"Product Set",
"href":"appc-gateway-release-information.html#GUID-1AD4F10C-8FBE-4CEE-B59E-EFB470E32571"
},
{
"title":"Changes and Enhancements",
"href":"appc-gateway-release-information.html#GUID-89B83CDB-B8DF-4B9D-96FB-2D17849BB4AA",
"topics":[
{
"title":"Gateway Password Encryption Tool",
"href":"appc-gateway-release-information.html#GUID-A56338A1-9EA0-417C-85BD-81672EC787D6"
},
{
"title":"Partial IPv6 Support",
"href":"appc-gateway-release-information.html#GUID-3455ABBB-E573-4137-8CD5-DF520EE71FE6"
}
]
},
{
"title":"Known Restrictions",
"href":"appc-gateway-release-information.html#GUID-DC385AE7-3BC5-4EFC-950A-1F350A06381D",
"topics":[
{
"title":"Known Restrictions for the Gateway",
"href":"appc-gateway-release-information.html#GUID-C6EA18B1-0A40-4BFB-A23E-259567922859"
},
{
"title":"Known Restrictions for PGAU",
"href":"appc-gateway-release-information.html#GUID-3275CFB0-D805-424B-9D86-BE264424B633"
}
]
}
]
},
{
"title":"<span class=\"secnum\">3 </span> System Requirements",
"href":"appc-gateway-system-requirements.html#GUID-9CE31DCF-BE16-4338-8E1B-683D829E21C9",
"topics":[
{
"title":"Hardware Requirements",
"href":"appc-gateway-system-requirements.html#GUID-9C66BFF3-9BDF-44A3-A852-1C47B0E9424C",
"topics":[
{
"title":"Processor Requirements",
"href":"appc-gateway-system-requirements.html#GUID-59450A5F-8410-4EA9-8CA0-87A10521934A"
},
{
"title":"Memory Requirements",
"href":"appc-gateway-system-requirements.html#GUID-EE7491D6-BD00-487B-BBD9-C10A697B7C81"
},
{
"title":"Network Attachment Requirements",
"href":"appc-gateway-system-requirements.html#GUID-54E9DC59-C7E6-4B5F-9285-D80415C82C50"
},
{
"title":"Disk Space Requirements",
"href":"appc-gateway-system-requirements.html#GUID-EEF4E41B-4370-48DB-90B2-F6F60052A18B"
}
]
},
{
"title":"Software Requirements",
"href":"appc-gateway-system-requirements.html#GUID-20D5707D-E453-4EFA-91AE-6C73369FAC28",
"topics":[
{
"title":"Operating System Requirements",
"href":"appc-gateway-system-requirements.html#GUID-AC3B5A5D-14A0-4207-8DB5-A830DD8A2B8C"
},
{
"title":"Communication Protocol Requirements",
"href":"appc-gateway-system-requirements.html#GUID-8AF88E61-E8F4-4809-9E6A-82177C3EF483"
},
{
"title":"Oracle Database Requirements",
"href":"appc-gateway-system-requirements.html#GUID-51F252B8-75F2-43DA-AA20-4C8E2A8458EF"
},
{
"title":"Oracle Networking Product Requirements",
"href":"appc-gateway-system-requirements.html#GUID-061BB541-733E-4BBA-9E74-16287528BC36"
},
{
"title":"IBM Mainframe Requirements",
"href":"appc-gateway-system-requirements.html#GUID-1319318A-B93C-452A-9C92-2AE43BD939FB"
}
]
}
]
},
{
"title":"<span class=\"secnum\">4 </span> Installing the Gateway",
"href":"appc-gateway-installation.html#GUID-7EF0DEAA-52AF-415D-A0B0-F0564076BB4A",
"topics":[
{
"title":"Before You Begin",
"href":"appc-gateway-installation.html#GUID-F285491D-B7E3-4F47-BC42-CCC743DD197E"
},
{
"title":"Planning to Upgrade or Migrate the Gateway",
"href":"appc-gateway-installation.html#GUID-AADBE3F5-2412-4B09-AED2-1439A7B5BE1B",
"topics":[
{
"title":"Performing Pre-Upgrade Procedures",
"href":"appc-gateway-installation.html#GUID-A0174300-F393-4750-86DD-55FA09D2C504"
},
{
"title":"Upgrade and Migration Considerations",
"href":"appc-gateway-installation.html#GUID-178335E6-228B-440A-A8FB-DBE661665271"
},
{
"title":"Restoration",
"href":"appc-gateway-installation.html#GUID-6158CFD3-C203-4F97-8C7B-CAC51CC02EF4"
}
]
},
{
"title":"Performing Preinstallation Procedures",
"href":"appc-gateway-installation.html#GUID-6DF74585-E6F8-47DF-96F5-65016389A74A",
"topics":[
{
"title":"Gateway Installation Methods",
"href":"appc-gateway-installation.html#GUID-2DD46FEC-909E-4352-9461-82E1E0B8CE27"
}
]
},
{
"title":"Installing the Gateway Software",
"href":"appc-gateway-installation.html#GUID-413BA6FF-FD5D-4F4F-8E50-9D67BE70E5AE"
},
{
"title":"Using Windows User Account as Oracle Home User",
"href":"appc-gateway-installation.html#GUID-17A60E5D-B00B-4E1E-826B-86438B85EFF2"
},
{
"title":"Installation Steps",
"href":"appc-gateway-installation.html#GUID-B0C88AC3-3870-4BD9-8154-5E15AF2D78D6",
"topics":[
{
"title":"Step 1: Log On to Windows System",
"href":"appc-gateway-installation.html#GUID-1F02D2F9-286A-43AF-B852-5FC86C80CF60"
},
{
"title":"Step 2: Ensure Minimum Amount of Disk Space",
"href":"appc-gateway-installation.html#GUID-928BE048-16AD-483D-B2C9-35F76BE6B257"
},
{
"title":"Step 3: Stop All Oracle Services",
"href":"appc-gateway-installation.html#GUID-E1FC2669-ED7B-4DC4-89EE-5C907D8554DB"
},
{
"title":"Step 4: Insert the Gateway Product Installation Media",
"href":"appc-gateway-installation.html#GUID-07489F94-4F80-4F64-B1BE-AC2646F030C6"
},
{
"title":"Step 5: Start the Oracle Universal Installer",
"href":"appc-gateway-installation.html#GUID-9EFCEAE3-2B2F-4A22-9C1F-670245F0D251"
},
{
"title":"Step 6: Step Through the Oracle Universal Installer",
"href":"appc-gateway-installation.html#GUID-3D8D7CE8-64BF-4DC8-A77A-275ECDF61F84",
"topics":[
{
"title":"Oracle Universal Installer on Windows Platforms",
"href":"appc-gateway-installation.html#GUID-DC9C3CD8-AD72-459F-9E16-6DB4A66E8403"
}
]
}
]
},
{
"title":"Removing Your Oracle Database Gateway for APPC",
"href":"appc-gateway-installation.html#GUID-0D9BF0B7-456C-41EC-8A6A-CB238F73DD72",
"topics":[
{
"title":"About the Deinstallation Tool",
"href":"appc-gateway-installation.html#GUID-4E6C5CCC-D924-4746-98A8-D6DC825096E3"
},
{
"title":"Removing Oracle Software",
"href":"appc-gateway-installation.html#GUID-1BD5CECF-B172-4224-9171-7EBA5AF8556C"
}
]
}
]
},
{
"title":"<span class=\"secnum\">5 </span> Configuring Your Oracle Network",
"href":"appc-gateway-configure-oracle-network.html#GUID-2A3A2F2C-7FF5-45C4-AC94-561AD15DFCD4"
},
{
"title":"<span class=\"secnum\">6 </span> Configuring the SNA Communication Package on Windows",
"href":"appc-gateway-configure-sna.html#GUID-88C38A89-B41D-4070-A707-F89F648B3CAE",
"topics":[
{
"title":"Using SNA Security Validation",
"href":"appc-gateway-configure-sna.html#GUID-996B1CEB-86CD-4261-A48C-0C7ABB10A40F"
},
{
"title":"Processing Inbound Connections",
"href":"appc-gateway-configure-sna.html#GUID-7C139FD4-2B62-47F3-80F7-BFFA03583480"
},
{
"title":"Configuring Your Microsoft Host Integration Server",
"href":"appc-gateway-configure-sna.html#GUID-37376AC3-E491-480B-ACD1-035F6E603559",
"topics":[
{
"title":"Independent Versus Dependent LUs",
"href":"appc-gateway-configure-sna.html#GUID-2FF7DD09-75FC-4556-BC70-9B2B25B59882"
},
{
"title":"Location of Sample SNA Server Definitions",
"href":"appc-gateway-configure-sna.html#GUID-49DDABA1-E7BC-4558-9C25-01C554D43A54"
},
{
"title":"HIS Definition Types",
"href":"appc-gateway-configure-sna.html#GUID-E3EEB335-124B-4130-A7CF-7BC39E85DC2F"
},
{
"title":"Methods of Creating SNA Server Definitions for the Gateway",
"href":"appc-gateway-configure-sna.html#GUID-020CE353-C095-4B6C-9390-55C9EE5099B5"
}
]
},
{
"title":"Creating SNA Server Definitions on Microsoft Host Integration Server",
"href":"appc-gateway-configure-sna.html#GUID-2050119C-E715-4B4F-A483-A25B6C3A43AE",
"topics":[
{
"title":"Server Selection",
"href":"appc-gateway-configure-sna.html#GUID-0EB96A60-A93F-4527-AB36-9286BED5CABA"
},
{
"title":"Link Service Definition",
"href":"appc-gateway-configure-sna.html#GUID-A510A568-09D1-4005-80F6-F7339A261B86"
},
{
"title":"Connection Definition",
"href":"appc-gateway-configure-sna.html#GUID-9E9C6315-0846-4DC8-A0A3-58CC329F15AD"
},
{
"title":"Local LU Definition",
"href":"appc-gateway-configure-sna.html#GUID-B6997C13-4727-4CDC-8ACA-A7E7E236580B"
},
{
"title":"Mode Definition",
"href":"appc-gateway-configure-sna.html#GUID-046B6F4A-DFDF-43E5-B3F1-9E564A8B56E6"
},
{
"title":"Remote LU Definition",
"href":"appc-gateway-configure-sna.html#GUID-7525F7F0-886F-4604-8C2B-6C9F11AD522B"
},
{
"title":"CPI-C Symbolic Destination Name",
"href":"appc-gateway-configure-sna.html#GUID-96C22A3D-4240-4DAA-AA4B-E0F82E9DEE87"
}
]
},
{
"title":"Configuring an IBM Communications Server",
"href":"appc-gateway-configure-sna.html#GUID-6F690AC1-F53E-476E-827D-A9A05B846D70",
"topics":[
{
"title":"Independent And Dependent LUs",
"href":"appc-gateway-configure-sna.html#GUID-A041D839-8F02-4533-9C5E-0CB8D3996724"
},
{
"title":"Definition Types",
"href":"appc-gateway-configure-sna.html#GUID-93E11F9E-4AD4-468A-8964-7E8D24CA9573"
}
]
},
{
"title":"Creating IBM Communications Server Definitions for the Gateway",
"href":"appc-gateway-configure-sna.html#GUID-27C4A162-C9E3-4538-B085-45BCC0BC8121",
"topics":[
{
"title":"Creating the Configuration",
"href":"appc-gateway-configure-sna.html#GUID-5C9D0DDA-12B0-4F40-A195-A5E59DFFCFF5"
},
{
"title":"Creating the Node",
"href":"appc-gateway-configure-sna.html#GUID-CAF7B733-F240-419A-ACFD-F3FB3272E8B0"
},
{
"title":"Creating Devices",
"href":"appc-gateway-configure-sna.html#GUID-D3C70BE0-7066-4088-BAF4-332DC8163F63"
},
{
"title":"Choosing the Device Type",
"href":"appc-gateway-configure-sna.html#GUID-12824BBE-1C17-4A39-840E-6C1459D5EDD6"
},
{
"title":"Configuring a LAN Device",
"href":"appc-gateway-configure-sna.html#GUID-1DDC9541-3C1E-41AD-9705-21B07032C12C"
},
{
"title":"Creating Peer Connections",
"href":"appc-gateway-configure-sna.html#GUID-4455C8C8-F532-4083-9DD1-3552733739F8"
},
{
"title":"Defining the Link Station",
"href":"appc-gateway-configure-sna.html#GUID-2AC52A55-5865-4297-8657-7B72296716DE"
},
{
"title":"Defining the Adjacent Node",
"href":"appc-gateway-configure-sna.html#GUID-A978129C-3B26-4ED0-AA6E-3F394F07E572"
},
{
"title":"Creating Local LUs",
"href":"appc-gateway-configure-sna.html#GUID-5C6AED2F-1AE6-4FCC-88E0-6D802F6BF654"
},
{
"title":"Defining Local LUs",
"href":"appc-gateway-configure-sna.html#GUID-66F4DB74-713F-43F6-8D47-9D9905CF6B71"
},
{
"title":"Creating Partner LUs",
"href":"appc-gateway-configure-sna.html#GUID-02079A83-4FA7-41E3-A340-A036185D1D04"
},
{
"title":"Defining Partner LUs",
"href":"appc-gateway-configure-sna.html#GUID-2ECB5D88-56A0-4D61-8129-D2C6EEBC709B"
},
{
"title":"Creating the CPI-C Side Information Profile",
"href":"appc-gateway-configure-sna.html#GUID-B99E80E7-9736-4BE7-928C-69107200350B"
}
]
},
{
"title":"Testing the Connection",
"href":"appc-gateway-configure-sna.html#GUID-DB9D6EA8-44BC-421A-BBF8-3F3DCD2CB282"
},
{
"title":"Resume Configuration of the Gateway",
"href":"appc-gateway-configure-sna.html#GUID-9374497B-F2F8-487F-A334-0082AF2F9FEF"
}
]
},
{
"title":"<span class=\"secnum\">7 </span> Configuring the OLTP",
"href":"appc-gateway-configure-oltp.html#GUID-7B0371E7-BBF8-4057-BFC4-4B1FA1BB54BB",
"topics":[
{
"title":"Configuring the OLTP for Your SNA Environment",
"href":"appc-gateway-configure-oltp.html#GUID-0FD2FE94-3229-4D0E-83F2-7B35B6DEF7CC"
},
{
"title":"Configuring the OLTP for Your TCP/IP Environment",
"href":"appc-gateway-configure-oltp.html#GUID-2B76963A-BD65-4D5B-BDFE-86A50847553B"
}
]
},
{
"title":"<span class=\"secnum\">8 </span> Configuring the Gateway Using SNA Communication Protocol",
"href":"configure-appc-gateway-using-sna.html#GUID-D5DF29FD-80BA-4E55-831D-D9CBD826E413",
"topics":[
{
"title":"Before You Begin",
"href":"configure-appc-gateway-using-sna.html#GUID-F913CE5A-572C-4C9B-A4F0-2EB4AA3631F4"
},
{
"title":"Preparing to Configure a Gateway Installation/Upgrade",
"href":"configure-appc-gateway-using-sna.html#GUID-B9313098-CEE8-4D8B-837B-ED87E67F7D09"
},
{
"title":"Integrating Server Configuration: First-Time Gateway Installations",
"href":"configure-appc-gateway-using-sna.html#GUID-C6CBC625-C0FE-496C-9EEC-096A04004A7D"
},
{
"title":"Upgrading or Migrating the Oracle Database from Previous Gateways",
"href":"configure-appc-gateway-using-sna.html#GUID-4E439D8E-8B91-439A-B239-1AE0FBA43154",
"topics":[
{
"title":"If You Must Reinstall Package Specifications",
"href":"configure-appc-gateway-using-sna.html#GUID-6ED43492-2A39-4F74-B830-1D79F7A69B32"
},
{
"title":"Upgrading PGAU from Previous Gateway Releases",
"href":"configure-appc-gateway-using-sna.html#GUID-A95D962C-066C-4FA1-BBD6-1E7FFAFD819E"
}
]
},
{
"title":"Configuring the Oracle Database for Gateways to Coexist",
"href":"configure-appc-gateway-using-sna.html#GUID-40EEFA66-1FCB-47BB-9EFF-EE6B25B97D5F"
},
{
"title":"Optional Configuration Steps to Permit Multiple Users",
"href":"configure-appc-gateway-using-sna.html#GUID-C750A196-5039-4370-99ED-0EB71862A930"
},
{
"title":"Configuring the Gateway",
"href":"configure-appc-gateway-using-sna.html#GUID-383121C7-26B7-4869-9C28-D9E83EAB79EA"
},
{
"title":"Configuring Commit-Confirm",
"href":"configure-appc-gateway-using-sna.html#GUID-D2D24B90-BD74-455C-98B5-08B35261CF06",
"topics":[
{
"title":"Configuring the Oracle Database for Commit-Confirm",
"href":"configure-appc-gateway-using-sna.html#GUID-A9F0651B-9F3B-427D-A7E1-D12E01CC5374"
},
{
"title":"Configuring Gateway Initialization Parameters for Commit-Confirm",
"href":"configure-appc-gateway-using-sna.html#GUID-D0334257-DC09-45B3-8071-62EE8F0A43E2"
},
{
"title":"Configuring the OLTP for Commit-Confirm",
"href":"configure-appc-gateway-using-sna.html#GUID-4E9B411B-D554-4945-825A-B4316404F7C0"
}
]
},
{
"title":"Verifying the Gateway Installation and OLTP Configuration",
"href":"configure-appc-gateway-using-sna.html#GUID-04DFBC73-2DC5-437C-83F5-CB679F20C35C",
"topics":[
{
"title":"Verifying the Gateway Installation",
"href":"configure-appc-gateway-using-sna.html#GUID-4270B0C3-A67D-4A2D-A06D-9B74836ED48E"
},
{
"title":"Verifying the OLTP Configuration",
"href":"configure-appc-gateway-using-sna.html#GUID-273B8DEB-A60B-4B98-9122-8E03D0FFB216",
"topics":[
{
"title":"CICS Verification",
"href":"configure-appc-gateway-using-sna.html#GUID-BEFDE1DF-6102-48C2-86B5-315E7E9C485F"
},
{
"title":"IMS/TM Verification",
"href":"configure-appc-gateway-using-sna.html#GUID-0D83B68D-908B-427E-A1E7-DDEED4B95C5E"
},
{
"title":"APPC/MVS Verification",
"href":"configure-appc-gateway-using-sna.html#GUID-15353C3B-AC05-40C8-9CC7-F899E56239CD"
}
]
},
{
"title":"Verifying OLTP Configuration for Commit-Confirm",
"href":"configure-appc-gateway-using-sna.html#GUID-487E4EE1-6B30-4B46-A79D-F3A0389EC612"
}
]
},
{
"title":"Performing Postinstallation Procedures",
"href":"configure-appc-gateway-using-sna.html#GUID-04B54C37-0458-4E94-BA62-D00F2F823AFD",
"topics":[
{
"title":"Installing Sample Applications",
"href":"configure-appc-gateway-using-sna.html#GUID-129F07F3-C3DC-4BCC-9C4E-10A98AB35E98"
}
]
}
]
},
{
"title":"<span class=\"secnum\">9 </span> Configuring the Gateway Using TCP/IP Communication Protocol",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-2E8F3302-CA66-45F1-A0F0-241FDEC9EBCE",
"topics":[
{
"title":"Before You Begin",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-07D12664-17E3-4DC4-B3C8-3BD452A44675"
},
{
"title":"Preparing to Configure a Gateway Installation/Upgrade",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-FCAB1574-8D8E-4F0F-93DA-9D79FB9FAB1A"
},
{
"title":"Configuring the Oracle Database : First Time Installation",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-C1D6A15C-21DD-480A-A45D-EE4D3554B54C"
},
{
"title":"Upgrading or Migrating the Oracle Database from Previous Gateways",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-9883203F-7CBC-484D-BCC3-CA836A9946B1",
"topics":[
{
"title":"If You Must Reinstall Package Specifications",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-8ECDBDAF-284D-447F-A8D6-767D82C8420A"
},
{
"title":"Upgrading PGAU from Previous Gateway Releases",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-F632840A-F2C5-496A-83E4-CCABBF1F7F39"
}
]
},
{
"title":"Optional Configuration Steps to Permit Multiple Users",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-9F6E76E3-09C2-47EE-B212-A7B8153752C6"
},
{
"title":"Configuring TCP/IP for the Gateway",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-4401EF36-6A5E-4DF2-8FB8-57D8EEFEDB92"
},
{
"title":"Configuring the Gateway",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-A60824B1-6CFC-4CC5-BB8A-C977BFA8A189"
},
{
"title":"Loading the PGA_TCP_IMSC Table",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-7FBE512D-4608-482A-8F27-EFBFF6BAE583"
},
{
"title":"Verifying the Gateway Installation and OLTP Configuration",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-8D4B842E-0CF0-4072-8FF6-3AC0737A7D00",
"topics":[
{
"title":"Verifying the Gateway Installation",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-02491AB4-86E5-4F50-B968-BE0457542AE5"
},
{
"title":"Verifying the OLTP Configuration",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-C360B987-27C2-47CE-8F69-A808B63A8C20",
"topics":[
{
"title":"IMS/TM Verification",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-85147605-CE9E-4F69-8243-CC3FC50F1B65"
}
]
}
]
},
{
"title":"Performing Postinstallation Procedures",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-7FB56CC9-2004-4716-9DF5-4082EC028816",
"topics":[
{
"title":"Installing Sample Applications",
"href":"configure-appc-gateway-using-tcp-ip.html#GUID-CA7EA2DB-3417-4206-8852-211FDAA6786A"
}
]
}
]
},
{
"title":"<span class=\"secnum\">10 </span> Security Requirements",
"href":"appc-gateway-security-requirements.html#GUID-87EF21D2-821F-4552-AB91-E0EC5C8A81CB",
"topics":[
{
"title":"Overview of Security Requirements",
"href":"appc-gateway-security-requirements.html#GUID-D6052F75-75BE-4CD2-A66A-6BA298028845"
},
{
"title":"Authenticating Application Logons",
"href":"appc-gateway-security-requirements.html#GUID-89593F28-EE64-4B7F-B757-A4C5EB4D83F7"
},
{
"title":"Defining and Controlling Database Links",
"href":"appc-gateway-security-requirements.html#GUID-C589D41D-90BD-4E78-8AD3-D63BBB7755BF",
"topics":[
{
"title":"Link Accessibility",
"href":"appc-gateway-security-requirements.html#GUID-214E2E55-2123-4A57-B7B5-C407201A7D7B"
},
{
"title":"Links and CONNECT Clauses",
"href":"appc-gateway-security-requirements.html#GUID-FB28A9DE-C5E7-4F67-B649-86E3D19C8151"
}
]
},
{
"title":"Using SNA Security Validation",
"href":"appc-gateway-security-requirements.html#GUID-9A645D8B-8805-41A8-81FE-E9A4985C654F",
"topics":[
{
"title":"Specifying SNA Conversation Security",
"href":"appc-gateway-security-requirements.html#GUID-B54F7B7A-31A4-4348-8B86-8467EAD963B0",
"topics":[
{
"title":"SNA Security Option SECURITY=NONE",
"href":"appc-gateway-security-requirements.html#GUID-70AEBAE0-A900-4C6C-859A-6831D944472F"
},
{
"title":"SNA Security Option SECURITY=PROGRAM",
"href":"appc-gateway-security-requirements.html#GUID-137F2523-60FF-4AB1-9A82-07823A5A0875"
}
]
}
]
},
{
"title":"TCP/IP Security",
"href":"appc-gateway-security-requirements.html#GUID-5B363323-57C7-47D8-8DE1-5B883448005B",
"topics":[
{
"title":"Specifying TCP/IP Conversation Security",
"href":"appc-gateway-security-requirements.html#GUID-CC900504-284A-4348-B180-7166ED16CE69",
"topics":[
{
"title":"TCP/IP Security Option SECURITY=NONE",
"href":"appc-gateway-security-requirements.html#GUID-35A2BB70-87C0-49F8-9367-7FB3E628CE4B"
},
{
"title":"TCP/IP Security Option SECURITY=PROGRAM",
"href":"appc-gateway-security-requirements.html#GUID-F6FE5DC3-DAA3-4FE3-AA62-F073E40B4069"
}
]
}
]
},
{
"title":"Passwords in the Gateway Initialization File",
"href":"appc-gateway-security-requirements.html#GUID-5BE1BC54-C7E5-4FA0-B1F3-257C54237E1C"
}
]
},
{
"title":"<span class=\"secnum\">11 </span> Migrating from Existing Gateways", | "href":"appc-gateway-migration.html#GUID-5B666B0B-A175-4453-9EAF-4EC4C63B22A7",
"topics":[
{
"title":"Step 1: Install the New Release",
"href":"appc-gateway-migration.html#GUID-5A896CA4-BC9C-42BE-875A-3B7AC2934751"
},
{
"title":"Step 2: Transfer initsid.ora Gateway Initialization File Parameters",
"href":"appc-gateway-migration.html#GUID-AB45B0AC-F4E3-4132-8BC8-261F56C1968B"
},
{
"title":"Backout Considerations When Migrating to New Releases",
"href":"appc-gateway-migration.html#GUID-B7C5EC69-7F8C-4415-B38B-79DA6556C87E"
},
{
"title":"Oracle Net Considerations",
"href":"appc-gateway-migration.html#GUID-19897933-4A23-40F4-A360-4D5D44EA6D79"
},
{
"title":"Parameter Changes: Version 4 to 12c Release 2 (12.2) of the Gateway",
"href":"appc-gateway-migration.html#GUID-59034BAA-DFCF-49E4-8069-C16BCA35D982"
},
{
"title":"Parameter Changes: Version 8 or Earlier to Gateway 12c Release 2 (12.2)",
"href":"appc-gateway-migration.html#GUID-DB50E653-6F5D-49A4-9F44-3B437C4C402C"
},
{
"title":"Migrating from Gateway Release 9.0.1 or 9.2.0 to Gateway 12c Release 2 (12.2)",
"href":"appc-gateway-migration.html#GUID-E834764F-24C6-4C32-9315-F45F5F317806"
}
]
},
{
"title":"Migrating from an Existing Gateway Using SNA to TCP/IP",
"href":"appc-gateway-migration.html#GUID-A95CF937-367D-4849-B9E3-1FDC885535AA",
"topics":[
{
"title":"To Use Existing TIPs with Existing Side Profile Definitions",
"href":"appc-gateway-migration.html#GUID-60F74020-0D5C-4792-8E15-D9617DEA146A"
}
]
}
]
},
{
"title":"<span class=\"secnum\">A </span> Gateway Initialization Parameters for SNA Protocol",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-84F7EB01-3551-4335-A54A-918F7E637F5D",
"topics":[
{
"title":"PGA Parameters",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-FA92B3D4-6DB1-499A-A382-B243EFCFBC98"
},
{
"title":"PGA_CAPABILITY Parameter Considerations",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-AFCB48DA-EF85-42A3-A7C7-62E8DA1CD778"
},
{
"title":"PGA_CONFIRM Parameter Considerations",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-F52E3C91-38FE-4935-87B0-89E5B2AAA9FD"
},
{
"title":"Sample listener.ora File for a Gateway Using SNA",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-B7251BC4-D7B4-4B35-8560-66A602D3FF31"
},
{
"title":"Sample tnsnames.ora File for a Gateway Using SNA",
"href":"appc-gateway-initialization-parameters-sna.html#GUID-396255E1-6B6C-49AE-9D15-A03AA1C73CD5"
}
]
},
{
"title":"<span class=\"secnum\">B </span> Gateway Initialization Parameters for TCP/IP Communication Protocol",
"href":"appc-gateway-initialization-parameters-tcp-ip.html#GUID-67D9D370-DB10-430B-93B8-BD8844DE7E3E",
"topics":[
{
"title":"Gateway Initialization Parameter File Using TCP/IP",
"href":"appc-gateway-initialization-parameters-tcp-ip.html#GUID-4FBBCBDD-0B42-49D7-8104-63DEDD046DCE"
},
{
"title":"Output for the pg4tcpmap Tool",
"href":"appc-gateway-initialization-parameters-tcp-ip.html#GUID-C9B3713C-E8A0-4D7A-A6C9-504B83D64733",
"topics":[
{
"title":"Sample listener.ora File for a Gateway Using TCP/IP",
"href":"appc-gateway-initialization-parameters-tcp-ip.html#GUID-6482FDAA-0A19-4DA7-8D48-234DA865554A"
},
{
"title":"Sample tnsnames.ora File for a Gateway Using TCP/IP",
"href":"appc-gateway-initialization-parameters-tcp-ip.html#GUID-E852A6CF-7C38-4595-B1A2-8EC6AE432E53"
}
]
}
]
},
{
"title":"<span class=\"secnum\">C </span> Gateway Terminology",
"href":"appc-gateway-terminology.html#GUID-E053475C-BC9A-4116-A4AF-0E8449F33F4E"
},
{
"title":"<span class=\"secnum\">D </span> Configuration Worksheet",
"href":"appc-gateway-configuration-worksheet.html#GUID-96B6C85D-6B37-42C7-ADE0-5752F46AF88A"
},
{
"title":"Index",
"href":"book-index.html"
}
]
}
]
}); | "href":"appc-gateway-migration.html#GUID-8367CF57-12EC-4ED9-B609-2CAE2F9AE723",
"topics":[
{
"title":"Migrating an Existing Gateway Instance to New Release Using SNA Protocol", |
write.rs | //! Writer-based compression/decompression streams
use std::io::prelude::*;
use std::io;
#[cfg(feature = "tokio")]
use futures::Poll;
#[cfg(feature = "tokio")]
use tokio_io::{AsyncRead, AsyncWrite};
use {Action, Status, Compression, Compress, Decompress};
/// A compression stream which will have uncompressed data written to it and
/// will write compressed data to an output stream.
pub struct BzEncoder<W: Write> {
data: Compress,
obj: Option<W>,
buf: Vec<u8>,
done: bool,
}
/// A compression stream which will have compressed data written to it and
/// will write uncompressed data to an output stream.
pub struct BzDecoder<W: Write> {
data: Decompress,
obj: Option<W>,
buf: Vec<u8>,
done: bool,
}
impl<W: Write> BzEncoder<W> {
/// Create a new compression stream which will compress at the given level
/// to write compress output to the give output stream.
pub fn new(obj: W, level: Compression) -> BzEncoder<W> {
BzEncoder {
data: Compress::new(level, 30),
obj: Some(obj),
buf: Vec::with_capacity(32 * 1024),
done: false,
}
}
fn dump(&mut self) -> io::Result<()> {
while self.buf.len() > 0 {
let n = match self.obj.as_mut().unwrap().write(&self.buf) {
Ok(n) => n,
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
Err(err) => return Err(err),
};
self.buf.drain(..n);
}
Ok(())
}
/// Acquires a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
self.obj.as_ref().unwrap()
}
/// Acquires a mutable reference to the underlying writer.
///
/// Note that mutating the output/input state of the stream may corrupt this
/// object, so care must be taken when using this method.
pub fn get_mut(&mut self) -> &mut W {
self.obj.as_mut().unwrap()
}
/// Attempt to finish this output stream, writing out final chunks of data.
///
/// Note that this function can only be used once data has finished being
/// written to the output stream. After this function is called then further
/// calls to `write` may result in a panic.
///
/// # Panics
///
/// Attempts to write data to this stream may result in a panic after this
/// function is called.
pub fn try_finish(&mut self) -> io::Result<()> {
while !self.done {
try!(self.dump());
let res = self.data.compress_vec(&[], &mut self.buf, Action::Finish);
if res == Ok(Status::StreamEnd) {
self.done = true;
break
}
}
self.dump()
}
/// Consumes this encoder, flushing the output stream.
///
/// This will flush the underlying data stream and then return the contained
/// writer if the flush succeeded.
///
/// Note that this function may not be suitable to call in a situation where
/// the underlying stream is an asynchronous I/O stream. To finish a stream
/// the `try_finish` (or `shutdown`) method should be used instead. To
/// re-acquire ownership of a stream it is safe to call this method after
/// `try_finish` or `shutdown` has returned `Ok`.
pub fn | (mut self) -> io::Result<W> {
try!(self.try_finish());
Ok(self.obj.take().unwrap())
}
/// Returns the number of bytes produced by the compressor
///
/// Note that, due to buffering, this only bears any relation to
/// `total_in()` after a call to `flush()`. At that point,
/// `total_out() / total_in()` is the compression ratio.
pub fn total_out(&self) -> u64 {
self.data.total_out()
}
/// Returns the number of bytes consumed by the compressor
/// (e.g. the number of bytes written to this stream.)
pub fn total_in(&self) -> u64 {
self.data.total_in()
}
}
impl<W: Write> Write for BzEncoder<W> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
loop {
try!(self.dump());
let total_in = self.total_in();
self.data.compress_vec(data, &mut self.buf, Action::Run)
.unwrap();
let written = (self.total_in() - total_in) as usize;
if written > 0 || data.len() == 0 {
return Ok(written)
}
}
}
fn flush(&mut self) -> io::Result<()> {
loop {
try!(self.dump());
let before = self.total_out();
self.data.compress_vec(&[], &mut self.buf, Action::Flush)
.unwrap();
if before == self.total_out() {
break
}
}
self.obj.as_mut().unwrap().flush()
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncWrite> AsyncWrite for BzEncoder<W> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try_nb!(self.try_finish());
self.get_mut().shutdown()
}
}
impl<W: Read + Write> Read for BzEncoder<W> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.get_mut().read(buf)
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncRead + AsyncWrite> AsyncRead for BzEncoder<W> {
}
impl<W: Write> Drop for BzEncoder<W> {
fn drop(&mut self) {
if self.obj.is_some() {
let _ = self.try_finish();
}
}
}
impl<W: Write> BzDecoder<W> {
/// Create a new decoding stream which will decompress all data written
/// to it into `obj`.
pub fn new(obj: W) -> BzDecoder<W> {
BzDecoder {
data: Decompress::new(false),
obj: Some(obj),
buf: Vec::with_capacity(32 * 1024),
done: false,
}
}
/// Acquires a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
self.obj.as_ref().unwrap()
}
/// Acquires a mutable reference to the underlying writer.
///
/// Note that mutating the output/input state of the stream may corrupt this
/// object, so care must be taken when using this method.
pub fn get_mut(&mut self) -> &mut W {
self.obj.as_mut().unwrap()
}
fn dump(&mut self) -> io::Result<()> {
while self.buf.len() > 0 {
let n = match self.obj.as_mut().unwrap().write(&self.buf) {
Ok(n) => n,
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
Err(err) => return Err(err),
};
self.buf.drain(..n);
}
Ok(())
}
/// Attempt to finish this output stream, writing out final chunks of data.
///
/// Note that this function can only be used once data has finished being
/// written to the output stream. After this function is called then further
/// calls to `write` may result in a panic.
///
/// # Panics
///
/// Attempts to write data to this stream may result in a panic after this
/// function is called.
pub fn try_finish(&mut self) -> io::Result<()> {
while !self.done {
try!(self.write(&[]));
}
self.dump()
}
/// Unwrap the underlying writer, finishing the compression stream.
///
/// Note that this function may not be suitable to call in a situation where
/// the underlying stream is an asynchronous I/O stream. To finish a stream
/// the `try_finish` (or `shutdown`) method should be used instead. To
/// re-acquire ownership of a stream it is safe to call this method after
/// `try_finish` or `shutdown` has returned `Ok`.
pub fn finish(&mut self) -> io::Result<W> {
try!(self.try_finish());
Ok(self.obj.take().unwrap())
}
/// Returns the number of bytes produced by the decompressor
///
/// Note that, due to buffering, this only bears any relation to
/// `total_in()` after a call to `flush()`. At that point,
/// `total_in() / total_out()` is the compression ratio.
pub fn total_out(&self) -> u64 {
self.data.total_out()
}
/// Returns the number of bytes consumed by the decompressor
/// (e.g. the number of bytes written to this stream.)
pub fn total_in(&self) -> u64 {
self.data.total_in()
}
}
impl<W: Write> Write for BzDecoder<W> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
if self.done {
return Ok(0)
}
loop {
try!(self.dump());
let before = self.total_in();
let res = self.data.decompress_vec(data, &mut self.buf);
let written = (self.total_in() - before) as usize;
let res = try!(res.map_err(|e| {
io::Error::new(io::ErrorKind::InvalidInput, e)
}));
if res == Status::StreamEnd {
self.done = true;
}
if written > 0 || data.len() == 0 || self.done {
return Ok(written)
}
}
}
fn flush(&mut self) -> io::Result<()> {
try!(self.dump());
self.obj.as_mut().unwrap().flush()
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncWrite> AsyncWrite for BzDecoder<W> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try_nb!(self.try_finish());
self.get_mut().shutdown()
}
}
impl<W: Read + Write> Read for BzDecoder<W> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.get_mut().read(buf)
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncRead + AsyncWrite> AsyncRead for BzDecoder<W> {
}
impl<W: Write> Drop for BzDecoder<W> {
fn drop(&mut self) {
if self.obj.is_some() {
let _ = self.try_finish();
}
}
}
#[cfg(test)]
mod tests {
use std::io::prelude::*;
use std::iter::repeat;
use partial_io::{GenInterrupted, PartialWithErrors, PartialWrite};
use super::{BzEncoder, BzDecoder};
#[test]
fn smoke() {
let d = BzDecoder::new(Vec::new());
let mut c = BzEncoder::new(d, ::Compression::default());
c.write_all(b"12834").unwrap();
let s = repeat("12345").take(100000).collect::<String>();
c.write_all(s.as_bytes()).unwrap();
let data = c.finish().unwrap().finish().unwrap();
assert_eq!(&data[0..5], b"12834");
assert_eq!(data.len(), 500005);
assert!(format!("12834{}", s).as_bytes() == &*data);
}
#[test]
fn write_empty() {
let d = BzDecoder::new(Vec::new());
let mut c = BzEncoder::new(d, ::Compression::default());
c.write(b"").unwrap();
let data = c.finish().unwrap().finish().unwrap();
assert_eq!(&data[..], b"");
}
#[test]
fn qc() {
::quickcheck::quickcheck(test as fn(_) -> _);
fn test(v: Vec<u8>) -> bool {
let w = BzDecoder::new(Vec::new());
let mut w = BzEncoder::new(w, ::Compression::default());
w.write_all(&v).unwrap();
v == w.finish().unwrap().finish().unwrap()
}
}
#[test]
fn qc_partial() {
::quickcheck::quickcheck(test as fn(_, _, _) -> _);
fn test(v: Vec<u8>,
encode_ops: PartialWithErrors<GenInterrupted>,
decode_ops: PartialWithErrors<GenInterrupted>) -> bool {
let w = BzDecoder::new(PartialWrite::new(Vec::new(), decode_ops));
let mut w = BzEncoder::new(PartialWrite::new(w, encode_ops), ::Compression::default());
w.write_all(&v).unwrap();
v == w.finish().unwrap().into_inner().finish().unwrap().into_inner()
}
}
}
| finish |
speaker_verification_plda.py | #!/usr/bin/python3
"""Recipe for training a speaker verification system based on PLDA using the voxceleb dataset.
The system employs a pre-trained model followed by a PLDA transformation.
The pre-trained model is automatically downloaded from the web if not specified.
To run this recipe, run the following command:
> python speaker_verification_plda.py hyperparams/verification_plda_xvector.yaml
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import os
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
import numpy
import pickle
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing.PLDA_LDA import Ndx
from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
# Compute embeddings from the waveforms
def compute_embeddings(wavs, wav_lens):
"""Compute speaker embeddings.
Arguments
---------
wavs : Torch.Tensor
Tensor containing the speech waveform (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
"""
wavs = wavs.to(params["device"])
wav_lens = wav_lens.to(params["device"])
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
embeddings = params["mean_var_norm_emb"](
embeddings, torch.ones(embeddings.shape[0]).to(embeddings.device)
)
return embeddings.squeeze(1)
def emb_computation_loop(split, set_loader, stat_file):
"""Computes the embeddings and saves the in a stat file"""
# Extract embeddings (skip if already done)
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Enrollment and test embeddings
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
# Stat object (used to collect embeddings)
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj
def verification_performance(scores_plda):
"""Computes the Equal Error Rate give the PLDA scores"""
# Create ids, labels, and scoring list for EER evaluation
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
# Assuming enrol_id and test_id are unique
i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
j = int(numpy.where(scores_plda.segset == test_id)[0][0])
s = float(scores_plda.scoremat[i, j])
labels.append(lab)
ids.append(enrol_id + "<>" + test_id)
if lab == 1:
positive_scores.append(s)
else:
negative_scores.append(s)
# Clean variable
del scores_plda
# Final EER computation
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
return eer, min_dcf
# Function to get mod and seg
def get_utt_ids_for_test(ids, data_dict):
mod = [data_dict[x]["wav1"]["data"] for x in ids]
seg = [data_dict[x]["wav2"]["data"] for x in ids]
return mod, seg
def | (params):
"Creates the dataloaders and their data processing pipelines."
data_folder = params["data_folder"]
# 1. Declarations:
# Train data (used for normalization)
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
# Enrol data
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
# Test data
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id"])
# 4 Create dataloaders
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb # noqa E402
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Prepare data from dev of Voxceleb1
logger.info("Data preparation")
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "test"],
split_ratio=[90, 10],
seg_dur=3,
)
# here we create the datasets objects as well as tokenization and encoding
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
# Initialize PLDA vars
modelset, segset = [], []
embeddings = numpy.empty(shape=[0, params["emb_dim"]], dtype=numpy.float64)
# Embedding file for train data
xv_file = os.path.join(
params["save_folder"], "VoxCeleb1_train_embeddings_stat_obj.pkl"
)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected()
params["embedding_model"].eval()
params["embedding_model"].to(params["device"])
# Computing training embeddings (skip it of if already extracted)
if not os.path.exists(xv_file):
logger.info("Extracting embeddings from Training set..")
with tqdm(train_dataloader, dynamic_ncols=True) as t:
for batch in t:
snt_id = batch.id
wav, lens = batch.sig
spk_ids = batch.spk_id
# Flattening speaker ids
modelset = modelset + spk_ids
# For segset
segset = segset + snt_id
# Compute embeddings
emb = compute_embeddings(wav, lens)
xv = emb.squeeze(1).cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
# Speaker IDs and utterance IDs
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
embeddings_stat = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
del embeddings
# Save TRAINING embeddings in StatObject_SB object
embeddings_stat.save_stat_object(xv_file)
else:
# Load the saved stat object for train embedding
logger.info("Skipping embedding Extraction for training set")
logger.info(
"Loading previously saved stat_object for train embeddings.."
)
with open(xv_file, "rb") as input:
embeddings_stat = pickle.load(input)
# Training Gaussian PLDA model
logger.info("Training PLDA model")
params["compute_plda"].plda(embeddings_stat)
logger.info("PLDA training completed")
# Set paths for enrol/test embeddings
enrol_stat_file = os.path.join(params["save_folder"], "stat_enrol.pkl")
test_stat_file = os.path.join(params["save_folder"], "stat_test.pkl")
ndx_file = os.path.join(params["save_folder"], "ndx.pkl")
# Compute enrol and Test embeddings
enrol_obj = emb_computation_loop("enrol", enrol_dataloader, enrol_stat_file)
test_obj = emb_computation_loop("test", test_dataloader, test_stat_file)
# Prepare Ndx Object
if not os.path.isfile(ndx_file):
models = enrol_obj.modelset
testsegs = test_obj.modelset
logger.info("Preparing Ndx")
ndx_obj = Ndx(models=models, testsegs=testsegs)
logger.info("Saving ndx obj...")
ndx_obj.save_ndx_object(ndx_file)
else:
logger.info("Skipping Ndx preparation")
logger.info("Loading Ndx from disk")
with open(ndx_file, "rb") as input:
ndx_obj = pickle.load(input)
# PLDA scoring
logger.info("PLDA scoring...")
scores_plda = fast_PLDA_scoring(
enrol_obj,
test_obj,
ndx_obj,
params["compute_plda"].mean,
params["compute_plda"].F,
params["compute_plda"].Sigma,
)
logger.info("Computing EER... ")
# Cleaning variable
del enrol_dataloader
del test_dataloader
del enrol_obj
del test_obj
del embeddings_stat
# Final EER computation
eer, min_dcf = verification_performance(scores_plda)
logger.info("EER(%%)=%f", eer * 100)
logger.info("min_dcf=%f", min_dcf * 100)
| dataio_prep |
animate_shader.rs | use bevy::{
prelude::*,
reflect::TypeUuid,
render::{
mesh::shape,
pipeline::{PipelineDescriptor, RenderPipeline},
render_graph::{base, RenderGraph, RenderResourcesNode},
renderer::RenderResources,
shader::{ShaderStage, ShaderStages},
},
};
/// This example shows how to animate a shader, by passing the global `time.seconds_since_startup()`
/// via a 'TimeComponent` to the shader.
pub fn | () {
App::build()
.add_plugins(DefaultPlugins)
.add_startup_system(setup.system())
.add_system(animate_shader.system())
.run();
}
#[derive(RenderResources, Default, TypeUuid)]
#[uuid = "463e4b8a-d555-4fc2-ba9f-4c880063ba92"]
struct TimeUniform {
value: f32,
}
const VERTEX_SHADER: &str = r#"
#version 450
layout(location = 0) in vec3 Vertex_Position;
layout(location = 1) in vec2 Vertex_Uv;
layout(location = 0) out vec2 v_Uv;
layout(set = 0, binding = 0) uniform CameraViewProj {
mat4 ViewProj;
};
layout(set = 1, binding = 0) uniform Transform {
mat4 Model;
};
void main() {
gl_Position = ViewProj * Model * vec4(Vertex_Position, 1.0);
v_Uv = Vertex_Uv;
}
"#;
const FRAGMENT_SHADER: &str = r#"
#version 450
layout(location = 0) in vec2 v_Uv;
layout(location = 0) out vec4 o_Target;
layout(set = 2, binding = 0) uniform TimeUniform_value {
float time;
};
void main() {
float speed = 0.7;
float translation = sin(time * speed);
float percentage = 0.6;
float threshold = v_Uv.x + translation * percentage;
vec3 red = vec3(1., 0., 0.);
vec3 blue = vec3(0., 0., 1.);
vec3 mixed = mix(red, blue, threshold);
o_Target = vec4(mixed, 1.0);
}
"#;
fn setup(
mut commands: Commands,
mut pipelines: ResMut<Assets<PipelineDescriptor>>,
mut shaders: ResMut<Assets<Shader>>,
mut meshes: ResMut<Assets<Mesh>>,
mut render_graph: ResMut<RenderGraph>,
) {
// Create a new shader pipeline.
let pipeline_handle = pipelines.add(PipelineDescriptor::default_config(ShaderStages {
vertex: shaders.add(Shader::from_glsl(ShaderStage::Vertex, VERTEX_SHADER)),
fragment: Some(shaders.add(Shader::from_glsl(ShaderStage::Fragment, FRAGMENT_SHADER))),
}));
// Add a `RenderResourcesNode` to our `RenderGraph`. This will bind `TimeComponent` to our shader.
render_graph.add_system_node(
"time_uniform",
RenderResourcesNode::<TimeUniform>::new(true),
);
// Add a `RenderGraph` edge connecting our new "time_component" node to the main pass node. This
// ensures that "time_component" runs before the main pass.
render_graph
.add_node_edge("time_uniform", base::node::MAIN_PASS)
.unwrap();
// Spawn a quad and insert the `TimeComponent`.
commands
.spawn_bundle(MeshBundle {
mesh: meshes.add(Mesh::from(shape::Quad::new(Vec2::new(5.0, 5.0)))),
render_pipelines: RenderPipelines::from_pipelines(vec![RenderPipeline::new(
pipeline_handle,
)]),
transform: Transform::from_xyz(0.0, 0.0, 0.0),
..Default::default()
})
.insert(TimeUniform { value: 0.0 });
// Spawn a camera.
commands.spawn_bundle(PerspectiveCameraBundle {
transform: Transform::from_xyz(0.0, 0.0, 8.0).looking_at(Vec3::ZERO, Vec3::Y),
..Default::default()
});
}
/// In this system we query for the `TimeComponent` and global `Time` resource, and set `time.seconds_since_startup()`
/// as the `value` of the `TimeComponent`. This value will be accessed by the fragment shader and used
/// to animate the shader.
fn animate_shader(time: Res<Time>, mut query: Query<&mut TimeUniform>) {
let mut time_uniform = query.single_mut().unwrap();
time_uniform.value = time.seconds_since_startup() as f32;
}
| main |
half_sudden.py | # -*- coding: utf-8 -*-
"""
Contains the definition of the SuddenDecay class.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import numpy as np
from . import SampleBasedDecay
logger = logging.getLogger('decay.half_sudden')
class HalfSuddenDecay(SampleBasedDecay):
| """
Class that decays the value following the sigmoid curve.
Sigmoid is:
k
Y = --------------------- + 1
a + bx
1 + e
This curve used a=10, b=-10, k=-2
This intersects the Y axis at
+1 and the X axis at -1 and +1. We're interested only in the
positive x.
"""
def __init__(self, *args, **kwargs):
""" Constructor. """
super(HalfSuddenDecay, self).__init__(
decay_name='.decay.half_sudden.', *args, **kwargs)
def __str__(self):
""" Represent this object as a human-readable string. """
return 'SuddenDecay()'
def __repr__(self):
""" Represent this object as a python constructor. """
return 'SuddenDecay()'
decay_x = np.array([
0.0,
0.05263157894736842,
0.10526315789473684,
0.15789473684210525,
0.21052631578947367,
0.2631578947368421,
0.3157894736842105,
0.3684210526315789,
0.42105263157894735,
0.47368421052631576,
0.5263157894736842,
0.5789473684210527,
0.631578947368421,
0.6842105263157894,
0.7368421052631579,
0.7894736842105263,
0.8421052631578947,
0.894736842105263,
0.9473684210526315,
1.0,
])
decay_y = np.array([
1.0,
0.9998463162863197,
0.9997398757902081,
0.9995597314205974,
0.999254877774581,
0.9987390684889199,
0.9978665723466811,
0.9963914462121438,
0.9938994809709213,
0.9896955173948945,
0.9826197888368629,
0.9707568136416107,
0.9509968204584932,
0.9184373437414545,
0.8657330022308358,
0.7828273568190789,
0.6581107760257361,
0.4825598285864794,
0.2572468384313463,
0.0,
]) |
|
chinilla_default_constants.py | from ceres.util.ints import uint64
from ceres.consensus.constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 27,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 62,
"DIFFICULTY_STARTING": 7,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, vanillanet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, vanillanet 4608. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for vanillanet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, vanillanet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, vanillanet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
# Forks of chinilla should change this value to provide replay attack protection. This is set to vanillanet genesis
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("53f4690da000fe21fff9c7b84dcff4263bd2c0c5886f2f7bf486940b206cd558"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"63a6b7cf123828c913c50580ee2a8beda829a464ee1a7cfcfe312d5b89496b12"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"09c505b9aae9fe97ba20d8622b63574fa2bfc19dcb9cbf2d45f3b53bcac60072" | ),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block # temporary change until #9125 gets in
"MEMPOOL_BLOCK_BUFFER": 10,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore | |
base.rs | use std::prelude::v1::*;
use crate::msgs::codec;
use crate::msgs::codec::{Codec, Reader};
use crate::key;
/// An externally length'd payload
#[derive(Debug, Clone, PartialEq)]
pub struct | (pub Vec<u8>);
impl Codec for Payload {
fn encode(&self, bytes: &mut Vec<u8>) {
bytes.extend_from_slice(&self.0);
}
fn read(r: &mut Reader) -> Option<Payload> {
Some(Payload(r.rest().to_vec()))
}
}
impl Payload {
pub fn new(bytes: Vec<u8>) -> Payload {
Payload(bytes)
}
pub fn empty() -> Payload {
Payload::new(Vec::new())
}
pub fn from_slice(data: &[u8]) -> Payload {
let mut v = Vec::with_capacity(data.len());
v.extend_from_slice(data);
Payload(v)
}
}
impl Codec for key::Certificate {
fn encode(&self, bytes: &mut Vec<u8>) {
codec::u24(self.0.len() as u32).encode(bytes);
bytes.extend_from_slice(&self.0);
}
fn read(r: &mut Reader) -> Option<key::Certificate> {
let len = codec::u24::read(r)?.0 as usize;
let mut sub = r.sub(len)?;
let body = sub.rest().to_vec();
Some(key::Certificate(body))
}
}
/// An arbitrary, unknown-content, u24-length-prefixed payload
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadU24(pub Vec<u8>);
impl PayloadU24 {
pub fn new(bytes: Vec<u8>) -> PayloadU24 {
PayloadU24(bytes)
}
pub fn empty() -> PayloadU24 {
PayloadU24::new(Vec::new())
}
}
impl Codec for PayloadU24 {
fn encode(&self, bytes: &mut Vec<u8>) {
codec::u24(self.0.len() as u32).encode(bytes);
bytes.extend_from_slice(&self.0);
}
fn read(r: &mut Reader) -> Option<PayloadU24> {
let len = codec::u24::read(r)?.0 as usize;
let mut sub = r.sub(len)?;
let body = sub.rest().to_vec();
Some(PayloadU24(body))
}
}
/// An arbitrary, unknown-content, u16-length-prefixed payload
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadU16(pub Vec<u8>);
impl PayloadU16 {
pub fn new(bytes: Vec<u8>) -> PayloadU16 {
PayloadU16(bytes)
}
pub fn empty() -> PayloadU16 {
PayloadU16::new(Vec::new())
}
pub fn encode_slice(slice: &[u8], bytes: &mut Vec<u8>) {
(slice.len() as u16).encode(bytes);
bytes.extend_from_slice(slice);
}
}
impl Codec for PayloadU16 {
fn encode(&self, bytes: &mut Vec<u8>) {
Self::encode_slice(&self.0, bytes);
}
fn read(r: &mut Reader) -> Option<PayloadU16> {
let len = u16::read(r)? as usize;
let mut sub = r.sub(len)?;
let body = sub.rest().to_vec();
Some(PayloadU16(body))
}
}
/// An arbitrary, unknown-content, u8-length-prefixed payload
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadU8(pub Vec<u8>);
impl PayloadU8 {
pub fn new(bytes: Vec<u8>) -> PayloadU8 {
PayloadU8(bytes)
}
pub fn empty() -> PayloadU8 {
PayloadU8(Vec::new())
}
pub fn into_inner(self) -> Vec<u8> { self.0 }
}
impl Codec for PayloadU8 {
fn encode(&self, bytes: &mut Vec<u8>) {
(self.0.len() as u8).encode(bytes);
bytes.extend_from_slice(&self.0);
}
fn read(r: &mut Reader) -> Option<PayloadU8> {
let len = u8::read(r)? as usize;
let mut sub = r.sub(len)?;
let body = sub.rest().to_vec();
Some(PayloadU8(body))
}
}
| Payload |
parser_x_helpers.go | package parse
import (
. "github.com/Foxcapades/Argonaut/v0/internal/log"
"github.com/Foxcapades/Argonaut/v0/internal/util"
A "github.com/Foxcapades/Argonaut/v0/pkg/argo"
R "reflect"
)
//┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓//
//┃ ┃//
//┃ Internal API: Helpers ┃//
//┃ ┃//
//┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛//
func (p *Parser) setup(args []string, com A.Command) {
TraceStart("Parser.setup", args, com)
defer TraceEnd(func() []interface{} { return nil })
p.makeMaps(com)
p.args = com.Arguments()
p.input = args
p.com = com
p.argI = 0
p.charI = 0
p.waiting = nil
}
func (p *Parser) popArg() (arg A.Argument) {
TraceStart("Parser.popArg")
defer TraceEnd(func() []interface{} { return []interface{}{arg} })
if p.waiting != nil {
arg = p.waiting.Argument()
p.waiting = nil
} else {
if len(p.args) > 0 {
arg = p.args[0]
p.args = p.args[1:]
}
}
return
}
func (p *Parser) eatString() (out string) {
TraceStart("Parser.eatString")
defer TraceEnd(func() []interface{} { return []interface{}{out} })
out = p.argument()[p.charI:]
return
}
// Returns the length of the current argument string
func (p *Parser) strLen() int {
return len(p.argument())
}
// returns the character at the current arg and char index
func (p *Parser) char() byte {
return p.argument()[p.charI]
}
// returns the argument at the current arg index
func (p *Parser) argument() string {
return p.input[p.argI]
}
// Create and populate parser maps
func (p *Parser) makeMaps(command A.Command) {
TraceStart("Parser.makeMaps", command)
defer TraceEnd(func() []interface{} { return nil })
p.shorts = make(map[byte]A.Flag)
p.longs = make(map[string]A.Flag)
p.reqs = make(map[uintptr]interface{})
for _, group := range command.FlagGroups() {
flags := group.Flags()
for i := range flags { | if flags[i].HasShort() {
p.shorts[flags[i].Short()] = flags[i]
}
if flags[i].HasLong() {
p.longs[flags[i].Long()] = flags[i]
}
if flags[i].Required() {
p.reqs[pointerFor(flags[i])] = flags[i]
}
}
}
args := command.Arguments()
for i := range args {
if args[i].Required() {
p.reqs[pointerFor(args[i])] = args[i]
}
}
}
func (p *Parser) unmarshal(arg A.Argument) {
TraceStart("Parser.unmarshal", arg)
defer TraceEnd(func() []interface{} { return nil })
bind := arg.Binding()
kind := util.GetRootValue(R.ValueOf(bind))
Trace(arg.Parent())
// If binding is specialized
if cst, ok := kind.Interface().(A.SpecializedUnmarshaler); ok {
util.Must(p.com.Unmarshaler().Unmarshal(p.argument(), bind))
arg.SetRawValue(p.argument())
if !cst.ConsumesArguments() {
p.argI--
}
return
}
// if binding is bool, only consume the arg if it's
// actually a valid bool value
if p.isBoolArg(arg) {
if util.IsBool(p.argument()) {
util.Must(p.com.Unmarshaler().Unmarshal(p.argument(), bind))
arg.SetRawValue(p.argument())
} else {
util.Must(p.com.Unmarshaler().Unmarshal("true", bind))
arg.SetRawValue("true")
p.argI--
}
return
}
util.Must(p.com.Unmarshaler().Unmarshal(p.argument(), bind))
arg.SetRawValue(p.argument())
} | |
tests.rs |
#![cfg(test)]
use super::*;
use crate::mock::*;
use runtime_io::with_externalities;
use srml_support::{assert_ok};
/*
* NB!: No test checks for even emission!!!!
*/
/*
* set_forum_sudo
* ==============================================================================
*
* Missing cases
*
* set_forum_bad_origin
*
*/
#[test]
fn set_forum_sudo_unset() {
let config = default_genesis_config();
with_externalities(&mut build_test_externalities(config), || {
// Ensure that forum sudo is default
assert_eq!(TestForumModule::forum_sudo(), Some(33));
// Unset forum sudo
assert_ok!(TestForumModule::set_forum_sudo(None));
// Sudo no longer set
assert!(TestForumModule::forum_sudo().is_none());
// event emitted?!
});
}
#[test]
fn set_forum_sudo_update() {
let config = default_genesis_config();
with_externalities(&mut build_test_externalities(config), || {
// Ensure that forum sudo is default
assert_eq!(TestForumModule::forum_sudo(), Some(default_genesis_config().forum_sudo));
let new_forum_sudo_account_id = 780;
// Unset forum sudo
assert_ok!(TestForumModule::set_forum_sudo(Some(new_forum_sudo_account_id)));
// Sudo no longer set
assert_eq!(TestForumModule::forum_sudo(), Some(new_forum_sudo_account_id));
});
}
/*
* create_category
* ==============================================================================
*
* Missing cases
*
* create_category_bad_origin
* create_category_forum_sudo_not_set
* create_category_origin_not_forum_sudo
* create_category_title_too_short
* create_category_title_too_long
* create_category_description_too_short
* create_category_description_too_long
*/
#[test]
fn create_category_successfully() {
let config = default_genesis_config();
with_externalities(&mut build_test_externalities(config), || {
CreateCategoryFixture {
origin: OriginType::Signed(default_genesis_config().forum_sudo),
parent: None,
title: "My new category".as_bytes().to_vec(),
description: "This is a great new category for the forum".as_bytes().to_vec(),
result: Ok(())
}
.call_and_assert();
});
}
#[test]
fn create_category_title_too_long() {
let config = default_genesis_config();
with_externalities(&mut build_test_externalities(config), || {
let genesis_config = default_genesis_config();
CreateCategoryFixture {
origin: OriginType::Signed(genesis_config.forum_sudo),
parent: None,
title: vec![b'X'; (genesis_config.category_title_constraint.max() as usize) + 1],
description: "This is a great new category for the forum".as_bytes().to_vec(),
result: Err(ERROR_CATEGORY_TITLE_TOO_LONG)
}
.call_and_assert();
});
}
/*
* update_category
* ==============================================================================
*
* Missing cases
*
* create_category_bad_origin
* create_category_forum_sudo_not_set
* create_category_origin_not_forum_sudo
* create_category_immutable_ancestor_category
*/
#[test]
fn update_category_undelete_and_unarchive() {
/*
* Create an initial state with two levels of categories, where
* leaf category is deleted, and then try to undelete.
*/
let forum_sudo = 32;
let created_at = RuntimeBlockchainTimestamp {
block : 0,
time: 0
};
let category_by_id = vec![
// A root category
(0, Category{
id: 0,
title: "New root".as_bytes().to_vec(),
description: "This is a new root category".as_bytes().to_vec(),
created_at : created_at.clone(),
deleted: false,
archived: false,
num_direct_subcategories: 1,
num_direct_unmoderated_threads: 0,
num_direct_moderated_threads: 0,
position_in_parent_category: None,
moderator_id: forum_sudo
}),
// A subcategory of the one above
(1, Category{
id: 1,
title: "New subcategory".as_bytes().to_vec(),
description: "This is a new subcategory to root category".as_bytes().to_vec(),
created_at : created_at.clone(),
deleted: true,
archived: false,
num_direct_subcategories: 0,
num_direct_unmoderated_threads: 0,
num_direct_moderated_threads: 0,
position_in_parent_category: Some(
ChildPositionInParentCategory {
parent_id: 0,
child_nr_in_parent_category: 1
}
),
moderator_id: forum_sudo
}),
];
// Set constraints to be sloppy, we don't care about enforcing them.
let sloppy_constraint = InputValidationLengthConstraint{
min: 0,
max_min_diff: 1000
};
let config = genesis_config(
&category_by_id, // category_by_id
category_by_id.len() as u64, // next_category_id
&vec![], // thread_by_id
0, // next_thread_id
&vec![], // post_by_id
0, // next_post_id
forum_sudo,
&sloppy_constraint,
&sloppy_constraint,
&sloppy_constraint,
&sloppy_constraint,
&sloppy_constraint,
&sloppy_constraint
);
with_externalities(&mut build_test_externalities(config), || {
UpdateCategoryFixture {
origin: OriginType::Signed(forum_sudo),
category_id: category_by_id[1].1.id,
new_archival_status: None, // same as before
new_deletion_status: Some(false), // undelete
result: Ok(())
}
.call_and_assert();
});
}
/*
* create_thread
* ==============================================================================
*
* Missing cases
*
* create_thread_bad_origin
* create_thread_forum_sudo_not_set
* ...
*/
#[test]
fn create_thread_not_forum_member() | {
let config = default_genesis_config();
with_externalities(&mut build_test_externalities(config), || {
let new_member = registry::Member {
id : 113
};
// User not there
assert!(registry::TestMembershipRegistryModule::get_member(&new_member.id).is_none());
// Add new membe
registry::TestMembershipRegistryModule::add_member(&new_member);
// Make sure its now there
assert!(registry::TestMembershipRegistryModule::get_member(&new_member.id).is_some());
});
} |
|
14.01.2022.py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 16:03:32 2022
@author: dariu
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:43:25 2021
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
from sklearn.cluster import SpectralCoclustering
from sklearn.metrics import consensus_score
from sklearn.cluster import SpectralBiclustering
from sklearn import svm
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
from imblearn.pipeline import make_pipeline
from imblearn.metrics import classification_report_imbalanced
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
#%%
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
dfs.append(df_temp)
df = pd.concat(dfs)
labels_true = df["SepsisLabel"].tolist()
#%%
'''
#df = df[["HR", "O2Sat", "Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2"]]
df = df[["Age", "Gender", "Unit1", "Unit2", "HospAdmTime", "ICULOS"]]
labels_gender = df["Gender"].tolist()
labels_unit1 = df["Unit1"].tolist()
labels_unit2 = df["Unit2"].tolist()
#############################################
'''
#%%
'''
df = df[[
"BaseExcess",
"HCO3",
"FiO2",
"pH",
"PaCO2",
"SaO2",
"AST",
"BUN",
"Alkalinephos",
"Calcium",
"Chloride",
"Creatinine",
"Bilirubin_direct",
"Glucose",
"Lactate",
"Magnesium",
"Phosphate",
"Potassium",
"Bilirubin_total",
"TroponinI",
"Hct",
"Hgb",
"PTT",
"WBC",
"Fibrinogen",
"Platelets"
]]
#%%
'''
#############################################
imputation_dims = [
'DBP',
'HR',
'O2Sat',
'Temp',
'SBP',
'MAP',
'Resp',
]
for d in imputation_dims:
mean = round(df[d].sum()/df.shape[0], 2)
df.loc[df[d].isna(), d] = mean
####################################################
df = df.drop(columns=["SepsisLabel"])
df_current = df.fillna(df.mean())
#df_current = df.fillna(2)
###########################################################
#df_current = df
##############################
#85 labels_pred?
#%%
'''
def calc_scores(X, labels_true, labels_pred):
rand_score = metrics.rand_score(labels_true, labels_pred)
adjusted_rand_score = metrics.adjusted_rand_score(labels_true, labels_pred)
adjusted_mutual_info_score = metrics.cluster.adjusted_mutual_info_score(labels_true, labels_pred)
silhouette_score = metrics.silhouette_score(X, labels_pred, metric='euclidean', sample_size=None, random_state=None)
print("Rand Score: " , str(rand_score) + "\n" +
"Adjusted Rand Score: " , str(adjusted_rand_score) + "\n"
"Adjusted Mutual Information Score: " + str(adjusted_mutual_info_score) + "\n"
"Silhouette Score: " , str(silhouette_score) + "\n"
)
'''
#%%
'''
############################################################
# initializing the pacmap instance
# Setting n_neighbors to "None" leads to a default choice shown below in "parameter" section
embedding = pacmap.PaCMAP(n_dims=5, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(df_current.values, init="pca")
####################################################################
'''
|
'''
model = SpectralCoclustering(n_clusters=9, random_state=0)
#model.fit(df_current.values)
model.fit(X_transformed)
#score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx]))
#print("consensus score: {:.3f}".format(score))
#fit_data = df_current.values[np.argsort(model.row_labels_)]
fit_data = X_transformed[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
fit_data = fit_data[0:len(labels_true), 0:41]
#plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.matshow(fit_data, cmap='Spectral')
#plt.matshow(fit_data, cmap=plt.cm.RdYlGn)
#plt.matshow(fit_data, cmap=plt.cm.YlOrRd)
#plt.matshow(fit_data)
#plt.matshow(fit_data, cmap='rainbow')
#plt.matshow(fit_data, cmap='Set1')
#plt.matshow(fit_data, cmap='tab20')
#plt.matshow(fit_data, cmap='gist_rainbow')
plt.gca().set_aspect('auto')
#plt.gca().set_aspect('equal', adjustable='box')
#plt.axis('scaled')
#plt.title("After biclustering; rearranged to show biclusters")
plt.show()
#%%
'''
#
#%%
'''
model = SpectralBiclustering(n_clusters=(10, 5), method="log", random_state=0)
#model = SpectralBiclustering(n_clusters=(10, 5), method="bistochastic", random_state=0)
model.fit(df_current.values)
#model.fit(X_transformed)
#fit_data = df_current.values[np.argsort(model.row_labels_)]
fit_data = df_current.values[np.argsort(model.row_labels_)]
#fit_data = X_transformed[:, np.argsort(model.column_labels_)]
#plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.matshow(fit_data, cmap='Spectral')
plt.gca().set_aspect('auto')
#plt.title("After biclustering; rearranged to show biclusters")
#plt.matshow(
# np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
# cmap=plt.cm.Blues,
#)
plt.matshow(
np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
cmap='Spectral',
)
plt.gca().set_aspect('auto')
#plt.title("Checkerboard structure of rearranged data")
plt.show()
'''
#%%
X_train, X_test, y_train, y_test = train_test_split(df_current, labels_true, test_size=0.2)
#%%
X_train_ss = X_train[0:int(0.1*len(X_train))]
y_train_ss = y_train[0:int(0.1*len(y_train))]
# Create a pipeline
pipeline = make_pipeline(
NearMiss(version=2), svm.SVC())
pipeline.fit(X_train_ss, y_train_ss)
# Classify and report the results
print(classification_report_imbalanced(y_test, pipeline.predict(X_test))) |
#%% |
helpers.py | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may | # not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def _connection_to_manager_uri(conn_uri):
proto, addr = conn_uri.split(':', 1)
if ':' in addr:
ip, port = addr.split(':', 1)
return 'p%s:%s:%s' % (proto, port, ip)
else:
return 'p%s:%s' % (proto, addr)
def enable_connection_uri(conn_uri, execute_func):
manager_uri = _connection_to_manager_uri(conn_uri)
execute_func(['ovs-vsctl', 'set-manager', manager_uri], run_as_root=True) | |
phonemeconversion.py | # -*- coding: utf-8 -*-
import logging
XSAMPA_TO_ARPABET_MAPPING = {
# stop
'p': 'P',
'b': 'B',
't': 'T',
'd': 'D',
'k': 'K',
'g': 'G',
'?': 'Q',
# 2 consonants
'pf': 'PF',
'ts': 'TS',
'tS': 'CH',
'dZ': 'JH',
# fricative
'f': 'F',
'v': 'V',
'T': 'TH',
'D': 'DH',
's': 'S',
'z': 'Z',
'S': 'SH',
'Z': 'ZH',
'C': 'CC',
'j': 'Y',
'x': 'X',
'R': 'RR',
'h': 'HH',
'H': 'HHH',
# nasal
'm': 'M',
'n': 'N',
'N': 'NG',
# liquid
'l': 'L',
'r': 'R',
# glide
'w': 'W',
# front vowels
'i': 'IY',
'i:': 'IIH',
'I': 'IH',
'y': 'UE',
'y:': 'YYH',
'Y': 'YY',
'e': 'EE',
'e:': 'EEH',
'2': 'OH',
'2:': 'OHH',
'9': 'OE',
'E': 'EH',
'E:': 'EHH',
'{': 'AE',
'{:': 'AEH',
'a': 'AH',
'a:': 'AAH',
'3': 'ER',
'3:': 'ERH',
# central vowels
'V': 'VV',
'@': 'AX',
'6': 'EX',
# back vowels
'u': 'UH',
'u:': 'UUH',
'U': 'UU',
'o': 'AO',
'o:': 'OOH',
'O': 'OO',
'O:': 'OOOH',
'A': 'AA',
'A:': 'AAAH',
'Q': 'QQ',
# diphtongs vowels
'aI': 'AY',
'OI': 'OI',
'aU': 'AW',
'OY': 'OY',
# Fuzzy stuff
'c': 'K',
'q': 'K'
}
MAX_PHONE_LENGTH = max([len(x) for x in XSAMPA_TO_ARPABET_MAPPING.keys()])
def xsampa_to_arpabet(xsampa_string, sep=' '):
logger = logging.getLogger(__name__)
s = xsampa_string.replace('-', '').replace('\'', '').replace(' ', '')
result = []
i = 0
while i < len(s):
num_remaining_chars = len(s) - i
phone_length = (MAX_PHONE_LENGTH
if MAX_PHONE_LENGTH > num_remaining_chars
else num_remaining_chars)
for j in range(phone_length, 0, -1):
phone = s[i:i + j]
if phone in XSAMPA_TO_ARPABET_MAPPING:
|
else:
logger.warning("Phone not found: '%s'", s[i])
i += 1
return sep.join(result)
| result.append(XSAMPA_TO_ARPABET_MAPPING[phone])
i += j
break |
index.json.ts | import { api } from './_api';
import type { RequestHandler } from '@sveltejs/kit';
// GET /todos.json
export const get: RequestHandler = async (request) => {
if (!request.context.userid) {
// the user has never visited the site before
// and so doesn't yet have a userid, which is
// set in `handle`, in src/hooks.js
return { body: [] };
}
const response = await api(request, `todos/${request.context.userid}`);
| return { body: [] };
}
return response;
};
// POST /todos.json
export const post: RequestHandler = async (request) => {
const response = await api(request, `todos/${request.context.userid}`, {
// because index.svelte posts a FormData object,
// request.body is _also_ a (readonly) FormData
// object, which allows us to get form data
// with the `body.get(key)` method
text: request.body.get('text')
});
return response;
}; | if (response.status === 404) {
// the user has visited before, but hasn't yet
// created a todo list. start with an empty array |
throughput_async.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion};
use spsc::channel;
use tokio::runtime::Runtime;
async fn run_channel(capacity: usize, counts: u32) {
let (mut tx, mut rx) = channel::<u32>(capacity);
tokio::spawn(async move {
for i in 0..counts {
tx.send_async(black_box(i)).await;
}
});
for _ in 0..counts {
black_box(rx.recv_async().await);
}
}
pub fn throughput_bench(c: &mut Criterion) {
let runtime = Runtime::new().unwrap();
const COUNTS: u32 = 1_000_000;
c.bench_function(&format!("async capacity 16 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(16, COUNTS))
});
| b.to_async(&runtime).iter(|| run_channel(32, COUNTS))
});
c.bench_function(&format!("async capacity 64 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(64, COUNTS))
});
c.bench_function(&format!("async capacity 128 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(128, COUNTS))
});
c.bench_function(&format!("async capacity 256 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(256, COUNTS))
});
c.bench_function(&format!("async capacity 512 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(512, COUNTS))
});
c.bench_function(&format!("async capacity 1024 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(1024, COUNTS))
});
c.bench_function(&format!("async capacity 2048 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(2048, COUNTS))
});
c.bench_function(&format!("async capacity 4096 messages {}", COUNTS), |b| {
b.to_async(&runtime).iter(|| run_channel(4096, COUNTS))
});
}
criterion_group!(benches, throughput_bench);
criterion_main!(benches); | c.bench_function(&format!("async capacity 32 messages {}", COUNTS), |b| { |
mod.rs | // Copyright 2020 Arm Limited (or its affiliates). All rights reserved.
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module for the flattened device tree.
pub mod fdt;
/// Module for the global interrupt controller configuration.
pub mod gic;
/// Layout for this aarch64 system.
pub mod layout;
/// Logic for configuring aarch64 registers.
pub mod regs;
pub use self::fdt::DeviceInfoForFDT;
use crate::DeviceType;
use crate::RegionType;
use aarch64::gic::GICDevice;
use std::collections::HashMap;
use std::ffi::CStr;
use std::fmt::Debug;
use std::sync::Arc;
use vm_memory::{
Address, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap,
GuestUsize,
};
/// Errors thrown while configuring aarch64 system.
#[derive(Debug)]
pub enum Error {
/// Failed to create a FDT.
SetupFDT(fdt::Error),
/// Failed to create a GIC.
SetupGIC(gic::Error),
/// Failed to compute the initramfs address.
InitramfsAddress,
/// Error configuring the general purpose registers
REGSConfiguration(regs::Error),
/// Error configuring the MPIDR register
VcpuRegMPIDR(hypervisor::HypervisorCpuError),
}
impl From<Error> for super::Error {
fn from(e: Error) -> super::Error {
super::Error::AArch64Setup(e)
}
}
#[derive(Debug, Copy, Clone)]
/// Specifies the entry point address where the guest must start
/// executing code.
pub struct EntryPoint {
/// Address in guest memory where the guest must start execution
pub entry_addr: GuestAddress,
}
/// Configure the specified VCPU, and return its MPIDR.
pub fn configure_vcpu(
fd: &Arc<dyn hypervisor::Vcpu>,
id: u8,
kernel_entry_point: Option<EntryPoint>,
vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
_phys_bits: u8,
) -> super::Result<u64> {
if let Some(kernel_entry_point) = kernel_entry_point {
regs::setup_regs(
fd,
id,
kernel_entry_point.entry_addr.raw_value(),
&vm_memory.memory(),
)
.map_err(Error::REGSConfiguration)?;
}
let mpidr = fd.read_mpidr().map_err(Error::VcpuRegMPIDR)?;
Ok(mpidr)
}
pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, RegionType)> {
let mut regions = Vec::new();
// 0 ~ 256 MiB: Reserved
regions.push((
GuestAddress(0),
layout::MEM_32BIT_DEVICES_START.0 as usize,
RegionType::Reserved,
));
// 256 MiB ~ 1 G: MMIO space
regions.push((
layout::MEM_32BIT_DEVICES_START,
layout::MEM_32BIT_DEVICES_SIZE as usize,
RegionType::SubRegion,
));
// 1G ~ 2G: reserved. The leading 256M for PCIe MMCONFIG space
regions.push((
layout::PCI_MMCONFIG_START,
(layout::RAM_64BIT_START - layout::PCI_MMCONFIG_START.0) as usize,
RegionType::Reserved,
));
regions.push((
GuestAddress(layout::RAM_64BIT_START),
size as usize,
RegionType::Ram,
));
| regions
}
/// Configures the system and should be called once per vm before starting vcpu threads.
///
/// # Arguments
///
/// * `guest_mem` - The memory to be used by the guest.
/// * `num_cpus` - Number of virtual CPUs the guest will have.
#[allow(clippy::too_many_arguments)]
pub fn configure_system<T: DeviceInfoForFDT + Clone + Debug, S: ::std::hash::BuildHasher>(
vm: &Arc<dyn hypervisor::Vm>,
guest_mem: &GuestMemoryMmap,
cmdline_cstring: &CStr,
vcpu_count: u64,
vcpu_mpidr: Vec<u64>,
device_info: &HashMap<(DeviceType, String), T, S>,
initrd: &Option<super::InitramfsConfig>,
pci_space_address: &(u64, u64),
) -> super::Result<Box<dyn GICDevice>> {
let gic_device = gic::kvm::create_gic(vm, vcpu_count).map_err(Error::SetupGIC)?;
fdt::create_fdt(
guest_mem,
cmdline_cstring,
vcpu_mpidr,
device_info,
&*gic_device,
initrd,
pci_space_address,
)
.map_err(Error::SetupFDT)?;
Ok(gic_device)
}
/// Returns the memory address where the initramfs could be loaded.
pub fn initramfs_load_addr(
guest_mem: &GuestMemoryMmap,
initramfs_size: usize,
) -> super::Result<u64> {
let round_to_pagesize = |size| (size + (super::PAGE_SIZE - 1)) & !(super::PAGE_SIZE - 1);
match GuestAddress(get_fdt_addr(&guest_mem))
.checked_sub(round_to_pagesize(initramfs_size) as u64)
{
Some(offset) => {
if guest_mem.address_in_range(offset) {
Ok(offset.raw_value())
} else {
Err(super::Error::AArch64Setup(Error::InitramfsAddress))
}
}
None => Err(super::Error::AArch64Setup(Error::InitramfsAddress)),
}
}
/// Returns the memory address where the kernel could be loaded.
pub fn get_kernel_start() -> u64 {
layout::RAM_64BIT_START
}
// Auxiliary function to get the address where the device tree blob is loaded.
fn get_fdt_addr(mem: &GuestMemoryMmap) -> u64 {
// If the memory allocated is smaller than the size allocated for the FDT,
// we return the start of the DRAM so that
// we allow the code to try and load the FDT.
if let Some(addr) = mem.last_addr().checked_sub(layout::FDT_MAX_SIZE as u64 - 1) {
if mem.address_in_range(addr) {
return addr.raw_value();
}
}
layout::RAM_64BIT_START
}
pub fn get_host_cpu_phys_bits() -> u8 {
// The value returned here is used to determine the physical address space size
// for a VM (IPA size).
// In recent kernel versions, the maximum IPA size supported by the host can be
// known by querying cap KVM_CAP_ARM_VM_IPA_SIZE. And the IPA size for a
// guest can be configured smaller.
// But in Cloud-Hypervisor we simply use the maximum value for the VM.
// Reference https://lwn.net/Articles/766767/.
//
// The correct way to query KVM_CAP_ARM_VM_IPA_SIZE is via rust-vmm/kvm-ioctls,
// which wraps all IOCTL's and provides easy interface to user hypervisors.
// For now the cap hasn't been supported. A separate patch will be submitted to
// rust-vmm to add it.
// So a hardcoded value is used here as a temporary solution.
// It will be replace once rust-vmm/kvm-ioctls is ready.
//
40
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_arch_memory_regions_dram() {
let regions = arch_memory_regions((1usize << 32) as u64); //4GB
assert_eq!(4, regions.len());
assert_eq!(GuestAddress(layout::RAM_64BIT_START), regions[3].0);
assert_eq!(1usize << 32, regions[3].1);
assert_eq!(RegionType::Ram, regions[3].2);
}
#[test]
fn test_get_fdt_addr() {
let mut regions = Vec::new();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE - 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(®ions).expect("Cannot initialize memory");
assert_eq!(get_fdt_addr(&mem), layout::RAM_64BIT_START);
regions.clear();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE) as usize,
));
let mem = GuestMemoryMmap::from_ranges(®ions).expect("Cannot initialize memory");
assert_eq!(get_fdt_addr(&mem), layout::RAM_64BIT_START);
regions.clear();
regions.push((
GuestAddress(layout::RAM_64BIT_START),
(layout::FDT_MAX_SIZE + 0x1000) as usize,
));
let mem = GuestMemoryMmap::from_ranges(®ions).expect("Cannot initialize memory");
assert_eq!(get_fdt_addr(&mem), 0x1000 + layout::RAM_64BIT_START);
regions.clear();
}
} | |
eister_emmc_mode.rs | #[doc = "Reader of register EISTER_EMMC_MODE"]
pub type R = crate::R<u16, super::EISTER_EMMC_MODE>;
#[doc = "Writer for register EISTER_EMMC_MODE"]
pub type W = crate::W<u16, super::EISTER_EMMC_MODE>;
#[doc = "Register EISTER_EMMC_MODE `reset()`'s with value 0"]
impl crate::ResetValue for super::EISTER_EMMC_MODE {
type Type = u16;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Command Timeout Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CMDTEO_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<CMDTEO_A> for bool {
#[inline(always)]
fn from(variant: CMDTEO_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CMDTEO`"]
pub type CMDTEO_R = crate::R<bool, CMDTEO_A>;
impl CMDTEO_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CMDTEO_A {
match self.bits {
false => CMDTEO_A::MASKED,
true => CMDTEO_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == CMDTEO_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CMDTEO_A::ENABLED
}
}
#[doc = "Write proxy for field `CMDTEO`"]
pub struct CMDTEO_W<'a> {
w: &'a mut W,
}
impl<'a> CMDTEO_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CMDTEO_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(CMDTEO_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CMDTEO_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u16) & 0x01);
self.w
}
}
#[doc = "Command CRC Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CMDCRC_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<CMDCRC_A> for bool {
#[inline(always)]
fn from(variant: CMDCRC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CMDCRC`"]
pub type CMDCRC_R = crate::R<bool, CMDCRC_A>;
impl CMDCRC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CMDCRC_A {
match self.bits {
false => CMDCRC_A::MASKED,
true => CMDCRC_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == CMDCRC_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CMDCRC_A::ENABLED
}
}
#[doc = "Write proxy for field `CMDCRC`"]
pub struct CMDCRC_W<'a> {
w: &'a mut W,
}
impl<'a> CMDCRC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CMDCRC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(CMDCRC_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CMDCRC_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u16) & 0x01) << 1);
self.w
}
}
#[doc = "Command End Bit Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CMDEND_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<CMDEND_A> for bool {
#[inline(always)]
fn from(variant: CMDEND_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CMDEND`"]
pub type CMDEND_R = crate::R<bool, CMDEND_A>;
impl CMDEND_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CMDEND_A {
match self.bits {
false => CMDEND_A::MASKED,
true => CMDEND_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == CMDEND_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CMDEND_A::ENABLED
}
}
#[doc = "Write proxy for field `CMDEND`"]
pub struct CMDEND_W<'a> {
w: &'a mut W,
}
impl<'a> CMDEND_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CMDEND_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(CMDEND_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CMDEND_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u16) & 0x01) << 2);
self.w
}
}
#[doc = "Command Index Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CMDIDX_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<CMDIDX_A> for bool {
#[inline(always)]
fn from(variant: CMDIDX_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CMDIDX`"]
pub type CMDIDX_R = crate::R<bool, CMDIDX_A>;
impl CMDIDX_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CMDIDX_A {
match self.bits {
false => CMDIDX_A::MASKED,
true => CMDIDX_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == CMDIDX_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CMDIDX_A::ENABLED
}
}
#[doc = "Write proxy for field `CMDIDX`"]
pub struct CMDIDX_W<'a> {
w: &'a mut W,
}
impl<'a> CMDIDX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CMDIDX_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(CMDIDX_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CMDIDX_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u16) & 0x01) << 3);
self.w
}
}
#[doc = "Data Timeout Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DATTEO_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<DATTEO_A> for bool {
#[inline(always)]
fn from(variant: DATTEO_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `DATTEO`"]
pub type DATTEO_R = crate::R<bool, DATTEO_A>;
impl DATTEO_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DATTEO_A {
match self.bits {
false => DATTEO_A::MASKED,
true => DATTEO_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == DATTEO_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DATTEO_A::ENABLED
}
}
#[doc = "Write proxy for field `DATTEO`"]
pub struct DATTEO_W<'a> {
w: &'a mut W,
}
impl<'a> DATTEO_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DATTEO_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(DATTEO_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(DATTEO_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u16) & 0x01) << 4);
self.w
}
}
#[doc = "Data CRC Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DATCRC_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<DATCRC_A> for bool {
#[inline(always)]
fn from(variant: DATCRC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `DATCRC`"]
pub type DATCRC_R = crate::R<bool, DATCRC_A>;
impl DATCRC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DATCRC_A {
match self.bits {
false => DATCRC_A::MASKED,
true => DATCRC_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == DATCRC_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DATCRC_A::ENABLED
}
}
#[doc = "Write proxy for field `DATCRC`"]
pub struct DATCRC_W<'a> {
w: &'a mut W,
}
impl<'a> DATCRC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DATCRC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(DATCRC_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(DATCRC_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u16) & 0x01) << 5);
self.w
}
}
#[doc = "Data End Bit Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DATEND_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<DATEND_A> for bool {
#[inline(always)]
fn from(variant: DATEND_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `DATEND`"]
pub type DATEND_R = crate::R<bool, DATEND_A>;
impl DATEND_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DATEND_A {
match self.bits {
false => DATEND_A::MASKED,
true => DATEND_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == DATEND_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DATEND_A::ENABLED
}
}
#[doc = "Write proxy for field `DATEND`"]
pub struct DATEND_W<'a> {
w: &'a mut W,
}
impl<'a> DATEND_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DATEND_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(DATEND_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(DATEND_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u16) & 0x01) << 6);
self.w
}
}
#[doc = "Current Limit Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CURLIM_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<CURLIM_A> for bool {
#[inline(always)]
fn from(variant: CURLIM_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CURLIM`"]
pub type CURLIM_R = crate::R<bool, CURLIM_A>;
impl CURLIM_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CURLIM_A {
match self.bits {
false => CURLIM_A::MASKED,
true => CURLIM_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == CURLIM_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CURLIM_A::ENABLED
}
}
#[doc = "Write proxy for field `CURLIM`"]
pub struct CURLIM_W<'a> {
w: &'a mut W,
}
impl<'a> CURLIM_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CURLIM_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(CURLIM_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CURLIM_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u16) & 0x01) << 7);
self.w
}
}
#[doc = "Auto CMD Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ACMD_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<ACMD_A> for bool {
#[inline(always)]
fn from(variant: ACMD_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ACMD`"]
pub type ACMD_R = crate::R<bool, ACMD_A>;
impl ACMD_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ACMD_A {
match self.bits {
false => ACMD_A::MASKED,
true => ACMD_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == ACMD_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ACMD_A::ENABLED
}
}
#[doc = "Write proxy for field `ACMD`"]
pub struct ACMD_W<'a> {
w: &'a mut W,
}
impl<'a> ACMD_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ACMD_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(ACMD_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(ACMD_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u16) & 0x01) << 8);
self.w
}
}
#[doc = "ADMA Error Status Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADMA_A {
#[doc = "0: Masked"]
MASKED = 0,
#[doc = "1: Enabled"]
ENABLED = 1,
}
impl From<ADMA_A> for bool {
#[inline(always)]
fn from(variant: ADMA_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADMA`"]
pub type ADMA_R = crate::R<bool, ADMA_A>;
impl ADMA_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADMA_A {
match self.bits {
false => ADMA_A::MASKED,
true => ADMA_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `MASKED`"]
#[inline(always)]
pub fn is_masked(&self) -> bool {
*self == ADMA_A::MASKED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ADMA_A::ENABLED
}
}
#[doc = "Write proxy for field `ADMA`"]
pub struct ADMA_W<'a> {
w: &'a mut W,
}
impl<'a> ADMA_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADMA_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Masked"]
#[inline(always)]
pub fn masked(self) -> &'a mut W {
self.variant(ADMA_A::MASKED)
}
#[doc = "Enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(ADMA_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u16) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `BOOTAE`"]
pub type BOOTAE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `BOOTAE`"]
pub struct BOOTAE_W<'a> {
w: &'a mut W,
}
impl<'a> BOOTAE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u16) & 0x01) << 12);
self.w
}
}
impl R {
#[doc = "Bit 0 - Command Timeout Error Status Enable"]
#[inline(always)]
pub fn cmdteo(&self) -> CMDTEO_R {
CMDTEO_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Command CRC Error Status Enable"]
#[inline(always)]
pub fn cmdcrc(&self) -> CMDCRC_R {
CMDCRC_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Command End Bit Error Status Enable"]
#[inline(always)]
pub fn cmdend(&self) -> CMDEND_R {
CMDEND_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Command Index Error Status Enable"]
#[inline(always)]
pub fn cmdidx(&self) -> CMDIDX_R {
CMDIDX_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Data Timeout Error Status Enable"]
#[inline(always)]
pub fn datteo(&self) -> DATTEO_R {
DATTEO_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Data CRC Error Status Enable"]
#[inline(always)]
pub fn datcrc(&self) -> DATCRC_R {
DATCRC_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - Data End Bit Error Status Enable"]
#[inline(always)]
pub fn datend(&self) -> DATEND_R {
DATEND_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - Current Limit Error Status Enable"]
#[inline(always)]
pub fn curlim(&self) -> CURLIM_R {
CURLIM_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Auto CMD Error Status Enable"]
#[inline(always)]
pub fn acmd(&self) -> ACMD_R {
ACMD_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - ADMA Error Status Enable"]
#[inline(always)]
pub fn adma(&self) -> ADMA_R {
ADMA_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 12 - Boot Acknowledge Error Status Enable"]
#[inline(always)]
pub fn bootae(&self) -> BOOTAE_R {
BOOTAE_R::new(((self.bits >> 12) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Command Timeout Error Status Enable"]
#[inline(always)]
pub fn cmdteo(&mut self) -> CMDTEO_W {
CMDTEO_W { w: self }
}
#[doc = "Bit 1 - Command CRC Error Status Enable"]
#[inline(always)]
pub fn cmdcrc(&mut self) -> CMDCRC_W {
CMDCRC_W { w: self }
}
#[doc = "Bit 2 - Command End Bit Error Status Enable"]
#[inline(always)]
pub fn cmdend(&mut self) -> CMDEND_W {
CMDEND_W { w: self }
}
#[doc = "Bit 3 - Command Index Error Status Enable"]
#[inline(always)]
pub fn | (&mut self) -> CMDIDX_W {
CMDIDX_W { w: self }
}
#[doc = "Bit 4 - Data Timeout Error Status Enable"]
#[inline(always)]
pub fn datteo(&mut self) -> DATTEO_W {
DATTEO_W { w: self }
}
#[doc = "Bit 5 - Data CRC Error Status Enable"]
#[inline(always)]
pub fn datcrc(&mut self) -> DATCRC_W {
DATCRC_W { w: self }
}
#[doc = "Bit 6 - Data End Bit Error Status Enable"]
#[inline(always)]
pub fn datend(&mut self) -> DATEND_W {
DATEND_W { w: self }
}
#[doc = "Bit 7 - Current Limit Error Status Enable"]
#[inline(always)]
pub fn curlim(&mut self) -> CURLIM_W {
CURLIM_W { w: self }
}
#[doc = "Bit 8 - Auto CMD Error Status Enable"]
#[inline(always)]
pub fn acmd(&mut self) -> ACMD_W {
ACMD_W { w: self }
}
#[doc = "Bit 9 - ADMA Error Status Enable"]
#[inline(always)]
pub fn adma(&mut self) -> ADMA_W {
ADMA_W { w: self }
}
#[doc = "Bit 12 - Boot Acknowledge Error Status Enable"]
#[inline(always)]
pub fn bootae(&mut self) -> BOOTAE_W {
BOOTAE_W { w: self }
}
}
| cmdidx |
mod.rs | pub mod metric_name;
pub mod metric_name_template; | pub mod header;
pub mod metrics;
pub mod record;
|
|
dev_test_cex_full_non_stop.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_cex_full_non_stop.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import math
import os
import requests
import sys
import time
import threading
try:
from binance.client import Client
except ImportError:
print("Please install `python-binance`! https://pypi.org/project/python-binance/#description")
sys.exit(1)
binance_api_key = ""
binance_api_secret = ""
channels = {'aggTrade', 'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_2h', 'kline_4h',
'kline_6h', 'kline_8h', 'kline_12h', 'kline_1d', 'kline_3d', 'kline_1w', 'kline_1M', 'miniTicker',
'ticker', 'bookTicker', 'depth5', 'depth10', 'depth20', 'depth', 'depth@100ms'}
arr_channels = {'!miniTicker', '!ticker', '!bookTicker'}
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def prin | ance_websocket_api_manager):
time.sleep(30)
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# create instance of BinanceWebSocketApiManager
#binance_websocket_api_manager = BinanceWebSocketApiManager(throw_exception_if_unrepairable=True)
binance_websocket_api_manager = BinanceWebSocketApiManager(throw_exception_if_unrepairable=False)
print("starting monitoring api!")
binance_websocket_api_manager.start_monitoring_api()
try:
binance_rest_client = Client(binance_api_key, binance_api_secret)
binance_websocket_api_manager = BinanceWebSocketApiManager()
except requests.exceptions.ConnectionError:
print("No internet connection?")
sys.exit(1)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = []
data = binance_rest_client.get_all_tickers()
for item in data:
markets.append(item['symbol'])
private_stream_id = binance_websocket_api_manager.create_stream(["!userData"],
["arr"],
api_key=binance_api_key,
api_secret=binance_api_secret,
stream_label="userData stream!")
binance_websocket_api_manager.create_stream(arr_channels, "arr", stream_label="`arr` channels")
divisor = math.ceil(len(markets) / binance_websocket_api_manager.get_limit_of_subscriptions_per_stream())
max_subscriptions = math.ceil(len(markets) / divisor)
for channel in channels:
if len(markets) <= max_subscriptions:
binance_websocket_api_manager.create_stream(channel, markets, stream_label=channel)
else:
loops = 1
i = 1
markets_sub = []
for market in markets:
markets_sub.append(market)
if i == max_subscriptions or loops * max_subscriptions + i == len(markets):
binance_websocket_api_manager.create_stream(channel, markets_sub,
stream_label=str(channel + "_" + str(i)))
markets_sub = []
i = 1
loops += 1
i += 1
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
| t_stream_data_from_stream_buffer(bin |
def.rs | use self::Namespace::*;
use crate::hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use crate::hir;
use crate::ty;
use crate::util::nodemap::DefIdMap;
use syntax::ast;
use syntax::ext::base::MacroKind;
use syntax::ast::NodeId;
use syntax_pos::Span;
use rustc_macros::HashStable;
use std::fmt::Debug;
/// Encodes if a `DefKind::Ctor` is the constructor of an enum variant or a struct.
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)]
pub enum CtorOf {
/// This `DefKind::Ctor` is a synthesized constructor of a tuple or unit struct.
Struct,
/// This `DefKind::Ctor` is a synthesized constructor of a tuple or unit variant.
Variant,
}
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)]
pub enum CtorKind {
/// Constructor function automatically created by a tuple struct/variant.
Fn,
/// Constructor constant automatically created by a unit struct/variant.
Const,
/// Unusable name in value namespace created by a struct variant.
Fictive,
}
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)]
pub enum NonMacroAttrKind {
/// Single-segment attribute defined by the language (`#[inline]`)
Builtin,
/// Multi-segment custom attribute living in a "tool module" (`#[rustfmt::skip]`).
Tool,
/// Single-segment custom attribute registered by a derive macro (`#[serde(default)]`).
DeriveHelper,
/// Single-segment custom attribute registered by a legacy plugin (`register_attribute`).
LegacyPluginHelper,
/// Single-segment custom attribute not registered in any way (`#[my_attr]`).
Custom,
}
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)]
pub enum DefKind {
// Type namespace
Mod,
/// Refers to the struct itself, `DefKind::Ctor` refers to its constructor if it exists.
Struct,
Union,
Enum,
/// Refers to the variant itself, `DefKind::Ctor` refers to its constructor if it exists.
Variant,
Trait,
/// `type Foo = impl Bar;`
OpaqueTy,
/// `type Foo = Bar;`
TyAlias,
ForeignTy,
TraitAlias,
AssocTy,
/// `type Foo = impl Bar;`
AssocOpaqueTy,
TyParam,
// Value namespace
Fn,
Const,
ConstParam,
Static,
/// Refers to the struct or enum variant's constructor.
Ctor(CtorOf, CtorKind),
Method,
AssocConst,
// Macro namespace
Macro(MacroKind),
}
impl DefKind {
pub fn descr(self, def_id: DefId) -> &'static str {
match self {
DefKind::Fn => "function",
DefKind::Mod if def_id.index == CRATE_DEF_INDEX && def_id.krate != LOCAL_CRATE =>
"crate",
DefKind::Mod => "module",
DefKind::Static => "static",
DefKind::Enum => "enum",
DefKind::Variant => "variant",
DefKind::Ctor(CtorOf::Variant, CtorKind::Fn) => "tuple variant",
DefKind::Ctor(CtorOf::Variant, CtorKind::Const) => "unit variant",
DefKind::Ctor(CtorOf::Variant, CtorKind::Fictive) => "struct variant",
DefKind::Struct => "struct",
DefKind::Ctor(CtorOf::Struct, CtorKind::Fn) => "tuple struct",
DefKind::Ctor(CtorOf::Struct, CtorKind::Const) => "unit struct",
DefKind::Ctor(CtorOf::Struct, CtorKind::Fictive) =>
bug!("impossible struct constructor"),
DefKind::OpaqueTy => "opaque type",
DefKind::TyAlias => "type alias",
DefKind::TraitAlias => "trait alias",
DefKind::AssocTy => "associated type",
DefKind::AssocOpaqueTy => "associated opaque type",
DefKind::Union => "union",
DefKind::Trait => "trait",
DefKind::ForeignTy => "foreign type",
DefKind::Method => "method",
DefKind::Const => "constant",
DefKind::AssocConst => "associated constant",
DefKind::TyParam => "type parameter",
DefKind::ConstParam => "const parameter",
DefKind::Macro(macro_kind) => macro_kind.descr(),
}
}
/// Gets an English article for the definition.
pub fn article(&self) -> &'static str {
match *self {
DefKind::AssocTy
| DefKind::AssocConst
| DefKind::AssocOpaqueTy
| DefKind::Enum
| DefKind::OpaqueTy => "an",
DefKind::Macro(macro_kind) => macro_kind.article(),
_ => "a",
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)]
pub enum Res<Id = hir::HirId> {
Def(DefKind, DefId),
// Type namespace
PrimTy(hir::PrimTy),
SelfTy(Option<DefId> /* trait */, Option<DefId> /* impl */),
ToolMod, // e.g., `rustfmt` in `#[rustfmt::skip]`
// Value namespace
SelfCtor(DefId /* impl */), // `DefId` refers to the impl
Local(Id),
// Macro namespace
NonMacroAttr(NonMacroAttrKind), // e.g., `#[inline]` or `#[rustfmt::skip]`
// All namespaces
Err,
}
/// The result of resolving a path before lowering to HIR,
/// with "module" segments resolved and associated item
/// segments deferred to type checking.
/// `base_res` is the resolution of the resolved part of the
/// path, `unresolved_segments` is the number of unresolved
/// segments.
///
/// ```text
/// module::Type::AssocX::AssocY::MethodOrAssocType
/// ^~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// base_res unresolved_segments = 3
///
/// <T as Trait>::AssocX::AssocY::MethodOrAssocType
/// ^~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~
/// base_res unresolved_segments = 2
/// ```
#[derive(Copy, Clone, Debug)]
pub struct PartialRes {
base_res: Res<NodeId>,
unresolved_segments: usize,
}
impl PartialRes {
#[inline]
pub fn new(base_res: Res<NodeId>) -> Self {
PartialRes { base_res, unresolved_segments: 0 }
}
#[inline]
pub fn with_unresolved_segments(base_res: Res<NodeId>, mut unresolved_segments: usize) -> Self {
if base_res == Res::Err { unresolved_segments = 0 }
PartialRes { base_res, unresolved_segments }
}
#[inline]
pub fn base_res(&self) -> Res<NodeId> {
self.base_res
}
#[inline]
pub fn unresolved_segments(&self) -> usize {
self.unresolved_segments
}
}
/// Different kinds of symbols don't influence each other.
///
/// Therefore, they have a separate universe (namespace).
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum Namespace {
TypeNS,
ValueNS,
MacroNS,
}
impl Namespace {
pub fn descr(self) -> &'static str {
match self {
TypeNS => "type",
ValueNS => "value",
MacroNS => "macro",
}
}
}
/// Just a helper ‒ separate structure for each namespace.
#[derive(Copy, Clone, Default, Debug)]
pub struct PerNS<T> {
pub value_ns: T,
pub type_ns: T,
pub macro_ns: T,
}
impl<T> PerNS<T> {
pub fn map<U, F: FnMut(T) -> U>(self, mut f: F) -> PerNS<U> {
PerNS {
value_ns: f(self.value_ns),
type_ns: f(self.type_ns),
macro_ns: f(self.macro_ns),
}
}
}
impl<T> ::std::ops::Index<Namespace> for PerNS<T> {
type Output = T;
fn index(&self, ns: Namespace) -> &T {
match ns {
ValueNS => &self.value_ns,
TypeNS => &self.type_ns,
MacroNS => &self.macro_ns,
}
}
}
impl<T> ::std::ops::IndexMut<Namespace> for PerNS<T> {
fn index_mut(&mut self, ns: Namespace) -> &mut T {
match ns {
ValueNS => &mut self.value_ns,
TypeNS => &mut self.type_ns,
MacroNS => &mut self.macro_ns,
}
}
}
impl<T> PerNS<Option<T>> {
/// Returns `true` if all the items in this collection are `None`.
pub fn is_empty(&self) -> bool {
self.type_ns.is_none() && self.value_ns.is_none() && self.macro_ns.is_none()
}
/// Returns an iterator over the items which are `Some`.
pub fn present_items(self) -> impl Iterator<Item=T> {
use std::iter::once;
once(self.type_ns)
.chain(once(self.value_ns))
.chain(once(self.macro_ns))
.filter_map(|it| it)
}
}
/// This is the replacement export map. It maps a module to all of the exports
/// within.
pub type ExportMap<Id> = DefIdMap<Vec<Export<Id>>>;
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)]
pub struct Export<Id> {
/// The name of the target.
pub ident: ast::Ident,
/// The resolution of the target.
pub res: Res<Id>,
/// The span of the target.
pub span: Span,
/// The visibility of the export.
/// We include non-`pub` exports for hygienic macros that get used from extern crates.
pub vis: ty::Visibility,
}
impl<Id> Export<Id> {
pub fn ma | >(self, map: impl FnMut(Id) -> R) -> Export<R> {
Export {
ident: self.ident,
res: self.res.map_id(map),
span: self.span,
vis: self.vis,
}
}
}
impl CtorKind {
pub fn from_ast(vdata: &ast::VariantData) -> CtorKind {
match *vdata {
ast::VariantData::Tuple(..) => CtorKind::Fn,
ast::VariantData::Unit(..) => CtorKind::Const,
ast::VariantData::Struct(..) => CtorKind::Fictive,
}
}
pub fn from_hir(vdata: &hir::VariantData) -> CtorKind {
match *vdata {
hir::VariantData::Tuple(..) => CtorKind::Fn,
hir::VariantData::Unit(..) => CtorKind::Const,
hir::VariantData::Struct(..) => CtorKind::Fictive,
}
}
}
impl NonMacroAttrKind {
pub fn descr(self) -> &'static str {
match self {
NonMacroAttrKind::Builtin => "built-in attribute",
NonMacroAttrKind::Tool => "tool attribute",
NonMacroAttrKind::DeriveHelper => "derive helper attribute",
NonMacroAttrKind::LegacyPluginHelper => "legacy plugin helper attribute",
NonMacroAttrKind::Custom => "custom attribute",
}
}
}
impl<Id> Res<Id> {
/// Return the `DefId` of this `Def` if it has an ID, else panic.
pub fn def_id(&self) -> DefId
where
Id: Debug,
{
self.opt_def_id().unwrap_or_else(|| {
bug!("attempted .def_id() on invalid res: {:?}", self)
})
}
/// Return `Some(..)` with the `DefId` of this `Res` if it has a ID, else `None`.
pub fn opt_def_id(&self) -> Option<DefId> {
match *self {
Res::Def(_, id) => Some(id),
Res::Local(..) |
Res::PrimTy(..) |
Res::SelfTy(..) |
Res::SelfCtor(..) |
Res::ToolMod |
Res::NonMacroAttr(..) |
Res::Err => {
None
}
}
}
/// Return the `DefId` of this `Res` if it represents a module.
pub fn mod_def_id(&self) -> Option<DefId> {
match *self {
Res::Def(DefKind::Mod, id) => Some(id),
_ => None,
}
}
/// A human readable name for the res kind ("function", "module", etc.).
pub fn descr(&self) -> &'static str {
match *self {
Res::Def(kind, def_id) => kind.descr(def_id),
Res::SelfCtor(..) => "self constructor",
Res::PrimTy(..) => "builtin type",
Res::Local(..) => "local variable",
Res::SelfTy(..) => "self type",
Res::ToolMod => "tool module",
Res::NonMacroAttr(attr_kind) => attr_kind.descr(),
Res::Err => "unresolved item",
}
}
/// Gets an English article for the `Res`.
pub fn article(&self) -> &'static str {
match *self {
Res::Def(kind, _) => kind.article(),
Res::Err => "an",
_ => "a",
}
}
pub fn map_id<R>(self, mut map: impl FnMut(Id) -> R) -> Res<R> {
match self {
Res::Def(kind, id) => Res::Def(kind, id),
Res::SelfCtor(id) => Res::SelfCtor(id),
Res::PrimTy(id) => Res::PrimTy(id),
Res::Local(id) => Res::Local(map(id)),
Res::SelfTy(a, b) => Res::SelfTy(a, b),
Res::ToolMod => Res::ToolMod,
Res::NonMacroAttr(attr_kind) => Res::NonMacroAttr(attr_kind),
Res::Err => Res::Err,
}
}
pub fn macro_kind(self) -> Option<MacroKind> {
match self {
Res::Def(DefKind::Macro(kind), _) => Some(kind),
Res::NonMacroAttr(..) => Some(MacroKind::Attr),
_ => None,
}
}
}
| p_id<R |
index.js | import React, { Component } from 'react';
import PropTypes from 'prop-types';
import { Image, ImageBackground, Platform, StyleSheet, TouchableOpacity, View, ViewPropTypes } from 'react-native';
import Icon from 'react-native-vector-icons/MaterialIcons';
import Video from 'react-native-video'; // eslint-disable-line
const BackgroundImage = ImageBackground || Image; // fall back to Image if RN < 0.46
const styles = StyleSheet.create({
preloadingPlaceholder: {
backgroundColor: 'black',
justifyContent: 'center',
alignItems: 'center',
},
thumbnail: {
backgroundColor: 'black',
justifyContent: 'center',
alignItems: 'center',
},
playButton: {
backgroundColor: 'rgba(0, 0, 0, 0.6)',
width: 64,
height: 64,
borderRadius: 32,
justifyContent: 'center',
alignItems: 'center',
},
playArrow: {
color: 'white',
},
video: Platform.Version >= 24 ? {} : {
backgroundColor: 'black',
},
controls: {
backgroundColor: 'rgba(0, 0, 0, 0.6)',
height: 48,
marginTop: -48,
flexDirection: 'row',
alignItems: 'center',
},
playControl: {
color: 'white',
padding: 8,
},
extraControl: {
color: 'white',
padding: 8,
},
seekBar: {
alignItems: 'center',
height: 30,
flexGrow: 1,
flexDirection: 'row',
paddingHorizontal: 10,
marginLeft: -10,
marginRight: -5,
},
seekBarFullWidth: {
marginLeft: 0,
marginRight: 0,
paddingHorizontal: 0,
marginTop: -3,
height: 3,
},
seekBarProgress: {
height: 3,
backgroundColor: '#F00',
},
seekBarKnob: {
width: 20,
height: 20,
marginHorizontal: -8,
marginVertical: -10,
borderRadius: 10,
backgroundColor: '#F00',
transform: [{ scale: 0.8 }],
zIndex: 1,
},
seekBarBackground: {
backgroundColor: 'rgba(255, 255, 255, 0.5)',
height: 3,
},
overlayButton: {
flex: 1,
},
});
export default class | extends Component {
constructor(props) {
super(props);
this.state = {
isStarted: props.autoplay,
isPlaying: props.autoplay,
width: 200,
progress: 0,
isMuted: props.defaultMuted,
isControlsVisible: !props.hideControlsOnStart,
duration: 0,
isSeeking: false,
};
this.seekBarWidth = 200;
this.wasPlayingBeforeSeek = props.autoplay;
this.seekTouchStart = 0;
this.seekProgressStart = 0;
this.onLayout = this.onLayout.bind(this);
this.onStartPress = this.onStartPress.bind(this);
this.onProgress = this.onProgress.bind(this);
this.onEnd = this.onEnd.bind(this);
this.onLoad = this.onLoad.bind(this);
this.onPlayPress = this.onPlayPress.bind(this);
this.onMutePress = this.onMutePress.bind(this);
this.showControls = this.showControls.bind(this);
this.onToggleFullScreen = this.onToggleFullScreen.bind(this);
this.onSeekBarLayout = this.onSeekBarLayout.bind(this);
this.onSeekGrant = this.onSeekGrant.bind(this);
this.onSeekRelease = this.onSeekRelease.bind(this);
this.onSeek = this.onSeek.bind(this);
this.onClose = this.onClose.bind(this);
}
componentDidMount() {
if (this.props.autoplay) {
this.hideControls();
}
}
componentWillUnmount() {
if (this.controlsTimeout) {
clearTimeout(this.controlsTimeout);
this.controlsTimeout = null;
}
}
onLayout(event) {
const { width } = event.nativeEvent.layout;
this.setState({
width,
});
}
onStartPress() {
if (this.props.onStart) {
this.props.onStart();
}
this.setState({
isPlaying: true,
isStarted: true,
});
this.hideControls();
}
onProgress(event) {
if (this.state.isSeeking) {
return;
}
if (this.props.onProgress) {
this.props.onProgress(event);
}
this.setState({
progress: event.currentTime / (this.props.duration || this.state.duration),
});
}
onEnd(event) {
if (this.props.onEnd) {
this.props.onEnd(event);
}
if (this.props.endWithThumbnail) {
this.setState({ isStarted: false });
this.player.dismissFullscreenPlayer();
}
this.setState({ progress: 1 });
this.player.seek(0);
if (!this.props.loop) {
this.setState({
isPlaying: false,
});
}
}
onLoad(event) {
if (this.props.onLoad) {
this.props.onLoad(event);
}
const { duration } = event;
this.setState({ duration });
}
onPlayPress() {
if (this.props.onPlayPress) {
this.props.onPlayPress();
}
this.setState({
isPlaying: !this.state.isPlaying,
});
this.showControls();
}
onMutePress() {
this.setState({
isMuted: !this.state.isMuted,
});
this.showControls();
}
onToggleFullScreen() {
this.player.presentFullscreenPlayer();
}
onClose() {
if(this.props.onClose){
this.props.onClose();
}
this.showControls();
}
onSeekBarLayout({ nativeEvent }) {
const customStyle = this.props.customStyles.seekBar;
let padding = 0;
if (customStyle && customStyle.paddingHorizontal) {
padding = customStyle.paddingHorizontal * 2;
} else if (customStyle) {
padding = customStyle.paddingLeft || 0;
padding += customStyle.paddingRight ? customStyle.paddingRight : 0;
} else {
padding = 20;
}
this.seekBarWidth = nativeEvent.layout.width - padding;
}
onSeekStartResponder() {
return true;
}
onSeekMoveResponder() {
return true;
}
onSeekGrant(e) {
this.seekTouchStart = e.nativeEvent.pageX;
this.seekProgressStart = this.state.progress;
this.wasPlayingBeforeSeek = this.state.isPlaying;
this.setState({
isSeeking: true,
isPlaying: false,
});
}
onSeekRelease() {
this.setState({
isSeeking: false,
isPlaying: this.wasPlayingBeforeSeek,
});
this.showControls();
}
onSeek(e) {
const diff = e.nativeEvent.pageX - this.seekTouchStart;
const ratio = 100 / this.seekBarWidth;
const progress = this.seekProgressStart + ((ratio * diff) / 100);
this.setState({
progress,
});
this.player.seek(progress * this.state.duration);
}
getSizeStyles() {
const { videoWidth, videoHeight } = this.props;
const { width } = this.state;
const ratio = videoHeight / videoWidth;
return {
height: width * ratio,
width,
};
}
hideControls() {
if (this.props.onHideControls) {
this.props.onHideControls();
}
if (this.props.disableControlsAutoHide) {
return;
}
if (this.controlsTimeout) {
clearTimeout(this.controlsTimeout);
this.controlsTimeout = null;
}
this.controlsTimeout = setTimeout(() => {
this.setState({ isControlsVisible: false });
}, this.props.controlsTimeout);
}
showControls() {
if (this.props.onShowControls) {
this.props.onShowControls();
}
this.setState({
isControlsVisible: true,
});
this.hideControls();
}
renderCloseButton () {
return (
<TouchableOpacity
style={{
backgroundColor: 'rgba(0, 0, 0, 0.2)',
position: 'absolute',
top: 10,
left: 10,}} onPress={this.onClose}>
<Icon style={{}} name="close" size={30} color ="#fefefe" />
</TouchableOpacity>
);
}
renderStartButton() {
const { customStyles } = this.props;
return (
<TouchableOpacity
style={[styles.playButton, customStyles.playButton]}
onPress={this.onStartPress}
>
<Icon style={[styles.playArrow, customStyles.playArrow]} name="play-arrow" size={42} />
</TouchableOpacity>
);
}
renderThumbnail() {
const { thumbnail, style, customStyles, ...props } = this.props;
return (
<BackgroundImage
{...props}
style={[
styles.thumbnail,
this.getSizeStyles(),
style,
customStyles.thumbnail,
]}
source={thumbnail}
>
{this.renderCloseButton()}
{this.renderStartButton()}
</BackgroundImage>
);
}
renderSeekBar(fullWidth) {
const { customStyles, disableSeek } = this.props;
return (
<View
style={[
styles.seekBar,
fullWidth ? styles.seekBarFullWidth : {},
customStyles.seekBar,
fullWidth ? customStyles.seekBarFullWidth : {},
]}
onLayout={this.onSeekBarLayout}
>
<View
style={[
{ flexGrow: this.state.progress },
styles.seekBarProgress,
customStyles.seekBarProgress,
]}
/>
{ !fullWidth && !disableSeek ? (
<View
style={[
styles.seekBarKnob,
customStyles.seekBarKnob,
this.state.isSeeking ? { transform: [{ scale: 1 }] } : {},
this.state.isSeeking ? customStyles.seekBarKnobSeeking : {},
]}
hitSlop={{ top: 20, bottom: 20, left: 10, right: 20 }}
onStartShouldSetResponder={this.onSeekStartResponder}
onMoveShouldSetPanResponder={this.onSeekMoveResponder}
onResponderGrant={this.onSeekGrant}
onResponderMove={this.onSeek}
onResponderRelease={this.onSeekRelease}
onResponderTerminate={this.onSeekRelease}
/>
) : null }
<View style={[
styles.seekBarBackground,
{ flexGrow: 1 - this.state.progress },
customStyles.seekBarBackground,
]} />
</View>
);
}
renderControls() {
const { customStyles } = this.props;
return (
<View style={[styles.controls, customStyles.controls]}>
<TouchableOpacity
onPress={this.onPlayPress}
style={[customStyles.controlButton, customStyles.playControl]}
>
<Icon
style={[styles.playControl, customStyles.controlIcon, customStyles.playIcon]}
name={this.state.isPlaying ? 'pause' : 'play-arrow'}
size={32}
/>
</TouchableOpacity>
{this.renderSeekBar()}
{this.props.muted ? null : (
<TouchableOpacity onPress={this.onMutePress} style={customStyles.controlButton}>
<Icon
style={[styles.extraControl, customStyles.controlIcon]}
name={this.state.isMuted ? 'volume-off' : 'volume-up'}
size={24}
/>
</TouchableOpacity>
)}
{(Platform.OS === 'android' || this.props.disableFullscreen) ? null : (
<TouchableOpacity onPress={this.onToggleFullScreen} style={customStyles.controlButton}>
<Icon
style={[styles.extraControl, customStyles.controlIcon]}
name="fullscreen"
size={32}
/>
</TouchableOpacity>
)}
</View>
);
}
renderVideo() {
const {
video,
style,
resizeMode,
pauseOnPress,
fullScreenOnLongPress,
customStyles,
...props
} = this.props;
return (
<View style={customStyles.videoWrapper}>
<Video
{...props}
style={[
styles.video,
this.getSizeStyles(),
style,
customStyles.video,
]}
ref={p => { this.player = p; }}
muted={this.props.muted || this.state.isMuted}
paused={!this.state.isPlaying}
onProgress={this.onProgress}
onEnd={this.onEnd}
onLoad={this.onLoad}
source={video}
resizeMode={resizeMode}
/>
<View
style={[
this.getSizeStyles(),
{ marginTop: -this.getSizeStyles().height },
]}
>
<TouchableOpacity
style={styles.overlayButton}
onPress={() => {
this.showControls();
if (pauseOnPress)
this.onPlayPress();
}}
onLongPress={() => {
if (fullScreenOnLongPress && Platform.OS !== 'android')
this.onToggleFullScreen();
}}
/>
</View>
{((!this.state.isPlaying) || this.state.isControlsVisible)
? this.renderCloseButton() : null}
{((!this.state.isPlaying) || this.state.isControlsVisible)
? this.renderControls() : this.renderSeekBar(true)}
</View>
);
}
renderContent() {
const { thumbnail, style } = this.props;
const { isStarted } = this.state;
if (!isStarted && thumbnail) {
return this.renderThumbnail();
} else if (!isStarted) {
return (
<View style={[styles.preloadingPlaceholder, this.getSizeStyles(), style]}>
{this.renderStartButton()}
</View>
);
}
return this.renderVideo();
}
render() {
return (
<View onLayout={this.onLayout} style={this.props.customStyles.wrapper}>
{this.renderContent()}
</View>
);
}
}
VideoPlayer.propTypes = {
video: Video.propTypes.source,
thumbnail: Image.propTypes.source,
videoWidth: PropTypes.number,
videoHeight: PropTypes.number,
duration: PropTypes.number,
autoplay: PropTypes.bool,
defaultMuted: PropTypes.bool,
muted: PropTypes.bool,
style: ViewPropTypes.style,
controlsTimeout: PropTypes.number,
disableControlsAutoHide: PropTypes.bool,
disableFullscreen: PropTypes.bool,
loop: PropTypes.bool,
resizeMode: Video.propTypes.resizeMode,
hideControlsOnStart: PropTypes.bool,
endWithThumbnail: PropTypes.bool,
disableSeek: PropTypes.bool,
pauseOnPress: PropTypes.bool,
fullScreenOnLongPress: PropTypes.bool,
customStyles: PropTypes.shape({
wrapper: ViewPropTypes.style,
video: Video.propTypes.style,
videoWrapper: ViewPropTypes.style,
controls: ViewPropTypes.style,
playControl: TouchableOpacity.propTypes.style,
controlButton: TouchableOpacity.propTypes.style,
controlIcon: Icon.propTypes.style,
playIcon: Icon.propTypes.style,
seekBar: ViewPropTypes.style,
seekBarFullWidth: ViewPropTypes.style,
seekBarProgress: ViewPropTypes.style,
seekBarKnob: ViewPropTypes.style,
seekBarKnobSeeking: ViewPropTypes.style,
seekBarBackground: ViewPropTypes.style,
thumbnail: Image.propTypes.style,
playButton: TouchableOpacity.propTypes.style,
playArrow: Icon.propTypes.style,
}),
onEnd: PropTypes.func,
onProgress: PropTypes.func,
onLoad: PropTypes.func,
onStart: PropTypes.func,
onPlayPress: PropTypes.func,
onHideControls: PropTypes.func,
onShowControls: PropTypes.func,
onClose: PropTypes.func,
};
VideoPlayer.defaultProps = {
videoWidth: 1280,
videoHeight: 720,
autoplay: false,
controlsTimeout: 2000,
loop: false,
resizeMode: 'contain',
disableSeek: false,
pauseOnPress: false,
fullScreenOnLongPress: false,
customStyles: {},
};
| VideoPlayer |
app.js | import Vue from 'vue' | import SocialSharing from 'vue-social-sharing'
import axios from 'axios'
Vue.prototype.$http = axios;
Vue.use(SocialSharing);
new Vue({
router: router, // routerにはrouter.jsファイルを設定します
}).$mount('#app') // routerを適用する要素を設定(マウント)します | import router from './router.js' |
main.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"net/http"
_ "net/http/pprof"
"os"
"time"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog"
"k8s.io/klog/klogr"
infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-gcp/controllers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/util/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = infrav1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
klog.InitFlags(nil)
var (
metricsAddr string
enableLeaderElection bool
watchNamespace string
profilerAddress string
gcpClusterConcurrency int
gcpMachineConcurrency int
syncPeriod time.Duration
)
flag.StringVar(
&metricsAddr,
"metrics-addr",
":8080",
"The address the metric endpoint binds to.",
)
flag.BoolVar(
&enableLeaderElection,
"enable-leader-election",
false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.",
)
flag.StringVar(
&watchNamespace,
"namespace",
"",
"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.",
)
flag.StringVar(
&profilerAddress,
"profiler-address",
"",
"Bind address to expose the pprof profiler (e.g. localhost:6060)",
)
flag.IntVar(&gcpClusterConcurrency,
"gcpcluster-concurrency",
10,
"Number of GCPClusters to process simultaneously",
)
flag.IntVar(&gcpMachineConcurrency,
"gcpmachine-concurrency",
10,
"Number of GCPMachines to process simultaneously",
)
flag.DurationVar(&syncPeriod,
"sync-period",
10*time.Minute,
"The minimum interval at which watched resources are reconciled (e.g. 15m)",
)
flag.Parse()
if watchNamespace != "" {
setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
}
if profilerAddress != "" {
setupLog.Info("Profiler listening for requests", "profiler-address", profilerAddress)
go func() {
setupLog.Error(http.ListenAndServe(profilerAddress, nil), "listen and serve error")
}()
}
ctrl.SetLogger(klogr.New())
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "controller-leader-election-capg",
SyncPeriod: &syncPeriod,
Namespace: watchNamespace,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("gcp-controller"))
if err = (&controllers.GCPMachineReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("GCPMachine"),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: gcpMachineConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "GCPMachine")
os.Exit(1) | Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("GCPCluster"),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: gcpClusterConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "GCPCluster")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
} | }
if err = (&controllers.GCPClusterReconciler{ |
container_exec.go | package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/version"
log "github.com/lxc/lxd/shared/log15"
)
type execWs struct {
command []string
container container
env map[string]string
rootUid int64
rootGid int64
conns map[int]*websocket.Conn
connsLock sync.Mutex
allConnected chan bool
controlConnected chan bool
interactive bool
fds map[int]string
width int
height int
}
func (s *execWs) Metadata() interface{} {
fds := shared.Jmap{}
for fd, secret := range s.fds {
if fd == -1 {
fds["control"] = secret
} else {
fds[strconv.Itoa(fd)] = secret
}
}
return shared.Jmap{"fds": fds}
}
func (s *execWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error {
secret := r.FormValue("secret")
if secret == "" {
return fmt.Errorf("missing secret")
}
for fd, fdSecret := range s.fds {
if secret == fdSecret {
conn, err := shared.WebsocketUpgrader.Upgrade(w, r, nil)
if err != nil {
return err
}
s.connsLock.Lock()
s.conns[fd] = conn
s.connsLock.Unlock()
if fd == -1 {
s.controlConnected <- true
return nil
}
s.connsLock.Lock()
for i, c := range s.conns {
if i != -1 && c == nil {
s.connsLock.Unlock()
return nil
}
}
s.connsLock.Unlock()
s.allConnected <- true
return nil
}
}
/* If we didn't find the right secret, the user provided a bad one,
* which 403, not 404, since this operation actually exists */
return os.ErrPermission
}
func (s *execWs) Do(op *operation) error {
<-s.allConnected
var err error
var ttys []*os.File
var ptys []*os.File
var stdin *os.File
var stdout *os.File
var stderr *os.File
if s.interactive {
ttys = make([]*os.File, 1)
ptys = make([]*os.File, 1)
ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid)
if err != nil {
return err
}
stdin = ttys[0]
stdout = ttys[0]
stderr = ttys[0]
if s.width > 0 && s.height > 0 {
shared.SetSize(int(ptys[0].Fd()), s.width, s.height)
}
} else {
ttys = make([]*os.File, 3)
ptys = make([]*os.File, 3)
for i := 0; i < len(ttys); i++ {
ptys[i], ttys[i], err = shared.Pipe()
if err != nil {
return err
}
}
stdin = ptys[0]
stdout = ttys[1]
stderr = ttys[2]
}
controlExit := make(chan bool)
attachedChildIsBorn := make(chan int)
attachedChildIsDead := make(chan bool, 1)
var wgEOF sync.WaitGroup
if s.interactive {
wgEOF.Add(1)
go func() {
attachedChildPid := <-attachedChildIsBorn
select {
case <-s.controlConnected:
break
case <-controlExit:
return
}
for {
s.connsLock.Lock()
conn := s.conns[-1]
s.connsLock.Unlock()
mt, r, err := conn.NextReader()
if mt == websocket.CloseMessage {
break | }
if err != nil {
logger.Debugf("Got error getting next reader %s", err)
er, ok := err.(*websocket.CloseError)
if !ok {
break
}
if er.Code != websocket.CloseAbnormalClosure {
break
}
// If an abnormal closure occurred, kill the attached process.
err := syscall.Kill(attachedChildPid, syscall.SIGKILL)
if err != nil {
logger.Debugf("Failed to send SIGKILL to pid %d.", attachedChildPid)
} else {
logger.Debugf("Sent SIGKILL to pid %d.", attachedChildPid)
}
return
}
buf, err := ioutil.ReadAll(r)
if err != nil {
logger.Debugf("Failed to read message %s", err)
break
}
command := api.ContainerExecControl{}
if err := json.Unmarshal(buf, &command); err != nil {
logger.Debugf("Failed to unmarshal control socket command: %s", err)
continue
}
if command.Command == "window-resize" {
winchWidth, err := strconv.Atoi(command.Args["width"])
if err != nil {
logger.Debugf("Unable to extract window width: %s", err)
continue
}
winchHeight, err := strconv.Atoi(command.Args["height"])
if err != nil {
logger.Debugf("Unable to extract window height: %s", err)
continue
}
err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight)
if err != nil {
logger.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight)
continue
}
} else if command.Command == "signal" {
if err := syscall.Kill(attachedChildPid, syscall.Signal(command.Signal)); err != nil {
logger.Debugf("Failed forwarding signal '%s' to PID %d.", command.Signal, attachedChildPid)
continue
}
logger.Debugf("Forwarded signal '%d' to PID %d.", command.Signal, attachedChildPid)
}
}
}()
go func() {
s.connsLock.Lock()
conn := s.conns[0]
s.connsLock.Unlock()
logger.Debugf("Starting to mirror websocket")
readDone, writeDone := shared.WebsocketExecMirror(conn, ptys[0], ptys[0], attachedChildIsDead, int(ptys[0].Fd()))
<-readDone
<-writeDone
logger.Debugf("Finished to mirror websocket")
conn.Close()
wgEOF.Done()
}()
} else {
wgEOF.Add(len(ttys) - 1)
for i := 0; i < len(ttys); i++ {
go func(i int) {
if i == 0 {
s.connsLock.Lock()
conn := s.conns[i]
s.connsLock.Unlock()
<-shared.WebsocketRecvStream(ttys[i], conn)
ttys[i].Close()
} else {
s.connsLock.Lock()
conn := s.conns[i]
s.connsLock.Unlock()
<-shared.WebsocketSendStream(conn, ptys[i], -1)
ptys[i].Close()
wgEOF.Done()
}
}(i)
}
}
finisher := func(cmdResult int, cmdErr error) error {
for _, tty := range ttys {
tty.Close()
}
s.connsLock.Lock()
conn := s.conns[-1]
s.connsLock.Unlock()
if conn == nil {
if s.interactive {
controlExit <- true
}
} else {
conn.Close()
}
attachedChildIsDead <- true
wgEOF.Wait()
for _, pty := range ptys {
pty.Close()
}
metadata := shared.Jmap{"return": cmdResult}
err = op.UpdateMetadata(metadata)
if err != nil {
return err
}
return cmdErr
}
cmd, _, attachedPid, err := s.container.Exec(s.command, s.env, stdin, stdout, stderr, false)
if err != nil {
return err
}
if s.interactive {
attachedChildIsBorn <- attachedPid
}
err = cmd.Wait()
if err == nil {
return finisher(0, nil)
}
exitErr, ok := err.(*exec.ExitError)
if ok {
status, ok := exitErr.Sys().(syscall.WaitStatus)
if ok {
return finisher(status.ExitStatus(), nil)
}
if status.Signaled() {
// 128 + n == Fatal error signal "n"
return finisher(128+int(status.Signal()), nil)
}
}
return finisher(-1, nil)
}
func containerExecPost(d *Daemon, r *http.Request) Response {
name := mux.Vars(r)["name"]
c, err := containerLoadByName(d.State(), name)
if err != nil {
return SmartError(err)
}
if !c.IsRunning() {
return BadRequest(fmt.Errorf("Container is not running."))
}
if c.IsFrozen() {
return BadRequest(fmt.Errorf("Container is frozen."))
}
post := api.ContainerExecPost{}
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
return BadRequest(err)
}
if err := json.Unmarshal(buf, &post); err != nil {
return BadRequest(err)
}
env := map[string]string{}
for k, v := range c.ExpandedConfig() {
if strings.HasPrefix(k, "environment.") {
env[strings.TrimPrefix(k, "environment.")] = v
}
}
if post.Environment != nil {
for k, v := range post.Environment {
env[k] = v
}
}
// Set default value for PATH
_, ok := env["PATH"]
if !ok {
env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
if c.FileExists("/snap") == nil {
env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"])
}
}
// Set default value for HOME
_, ok = env["HOME"]
if !ok {
env["HOME"] = "/root"
}
// Set default value for USER
_, ok = env["USER"]
if !ok {
env["USER"] = "root"
}
// Set default value for USER
_, ok = env["LANG"]
if !ok {
env["LANG"] = "C.UTF-8"
}
if post.WaitForWS {
ws := &execWs{}
ws.fds = map[int]string{}
idmapset, err := c.IdmapSet()
if err != nil {
return InternalError(err)
}
if idmapset != nil {
ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0)
}
ws.conns = map[int]*websocket.Conn{}
ws.conns[-1] = nil
ws.conns[0] = nil
if !post.Interactive {
ws.conns[1] = nil
ws.conns[2] = nil
}
ws.allConnected = make(chan bool, 1)
ws.controlConnected = make(chan bool, 1)
ws.interactive = post.Interactive
for i := -1; i < len(ws.conns)-1; i++ {
ws.fds[i], err = shared.RandomCryptoString()
if err != nil {
return InternalError(err)
}
}
ws.command = post.Command
ws.container = c
ws.env = env
ws.width = post.Width
ws.height = post.Height
resources := map[string][]string{}
resources["containers"] = []string{ws.container.Name()}
op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect)
if err != nil {
return InternalError(err)
}
return OperationResponse(op)
}
run := func(op *operation) error {
var cmdErr error
var cmdResult int
metadata := shared.Jmap{}
if post.RecordOutput {
// Prepare stdout and stderr recording
stdout, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
defer stdout.Close()
stderr, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
defer stderr.Close()
// Run the command
_, cmdResult, _, cmdErr = c.Exec(post.Command, env, nil, stdout, stderr, true)
// Update metadata with the right URLs
metadata["return"] = cmdResult
metadata["output"] = shared.Jmap{
"1": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, c.Name(), filepath.Base(stdout.Name())),
"2": fmt.Sprintf("/%s/containers/%s/logs/%s", version.APIVersion, c.Name(), filepath.Base(stderr.Name())),
}
} else {
_, cmdResult, _, cmdErr = c.Exec(post.Command, env, nil, nil, nil, true)
metadata["return"] = cmdResult
}
err = op.UpdateMetadata(metadata)
if err != nil {
logger.Error("error updating metadata for cmd", log.Ctx{"err": err, "cmd": post.Command})
}
return cmdErr
}
resources := map[string][]string{}
resources["containers"] = []string{name}
op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil)
if err != nil {
return InternalError(err)
}
return OperationResponse(op)
} | |
ingress_handler_test.go | /*
* Copyright 2019 The Knative Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ingress
import (
"context"
nethttp "net/http"
"reflect"
"sync"
"testing"
"time"
cloudevents "github.com/cloudevents/sdk-go"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
"go.uber.org/zap" |
const (
validURI = "/testNamespace/testBroker"
validHTTPMethod = nethttp.MethodPost
)
type mockReporter struct {
eventCountReported bool
eventDispatchTimeReported bool
}
func (r *mockReporter) ReportEventCount(args *ReportArgs, responseCode int) error {
r.eventCountReported = true
return nil
}
func (r *mockReporter) ReportEventDispatchTime(args *ReportArgs, responseCode int, d time.Duration) error {
r.eventDispatchTimeReported = true
return nil
}
type fakeClient struct {
sent bool
fn interface{}
mux sync.Mutex
}
func (f *fakeClient) Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) {
f.sent = true
return ctx, &event, nil
}
func (f *fakeClient) StartReceiver(ctx context.Context, fn interface{}) error {
f.mux.Lock()
f.fn = fn
f.mux.Unlock()
<-ctx.Done()
return nil
}
func (f *fakeClient) ready() bool {
f.mux.Lock()
ready := f.fn != nil
f.mux.Unlock()
return ready
}
func (f *fakeClient) fakeReceive(t *testing.T, event cloudevents.Event) {
// receive(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error
resp := new(cloudevents.EventResponse)
tctx := http.TransportContext{Header: nethttp.Header{}, Method: validHTTPMethod, URI: validURI}
ctx := http.WithTransportContext(context.Background(), tctx)
fnType := reflect.TypeOf(f.fn)
if fnType.Kind() != reflect.Func {
t.Fatal("wrong method type.", fnType.Kind())
}
fn := reflect.ValueOf(f.fn)
_ = fn.Call([]reflect.Value{reflect.ValueOf(ctx), reflect.ValueOf(event), reflect.ValueOf(resp)})
}
func TestIngressHandler_Receive_FAIL(t *testing.T) {
testCases := map[string]struct {
httpmethod string
URI string
expectedStatus int
expectedEventCount bool
expectedEventDispatchTime bool
}{
"method not allowed": {
httpmethod: nethttp.MethodGet,
URI: validURI,
expectedStatus: nethttp.StatusMethodNotAllowed,
},
"invalid url": {
httpmethod: validHTTPMethod,
URI: "invalidURI",
expectedStatus: nethttp.StatusNotFound,
},
}
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
client, _ := cloudevents.NewDefaultClient()
reporter := &mockReporter{}
handler := Handler{
Logger: zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())),
CeClient: client,
Reporter: reporter,
Defaulter: broker.TTLDefaulter(zap.NewNop(), 5),
}
event := cloudevents.NewEvent(cloudevents.VersionV1)
resp := new(cloudevents.EventResponse)
tctx := http.TransportContext{Header: nethttp.Header{}, Method: tc.httpmethod, URI: tc.URI}
ctx := http.WithTransportContext(context.Background(), tctx)
_ = handler.receive(ctx, event, resp)
if resp.Status != tc.expectedStatus {
t.Errorf("Unexpected status code. Expected %v, Actual %v", tc.expectedStatus, resp.Status)
}
if reporter.eventCountReported != tc.expectedEventCount {
t.Errorf("Unexpected event count reported. Expected %v, Actual %v", tc.expectedEventCount, reporter.eventCountReported)
}
if reporter.eventDispatchTimeReported != tc.expectedEventDispatchTime {
t.Errorf("Unexpected event dispatch time reported. Expected %v, Actual %v", tc.expectedEventDispatchTime, reporter.eventDispatchTimeReported)
}
})
}
}
func TestIngressHandler_Receive_Succeed(t *testing.T) {
client := &fakeClient{}
reporter := &mockReporter{}
handler := Handler{
Logger: zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())),
CeClient: client,
Reporter: reporter,
Defaulter: broker.TTLDefaulter(zap.NewNop(), 5),
}
event := cloudevents.NewEvent()
resp := new(cloudevents.EventResponse)
tctx := http.TransportContext{Header: nethttp.Header{}, Method: validHTTPMethod, URI: validURI}
ctx := http.WithTransportContext(context.Background(), tctx)
_ = handler.receive(ctx, event, resp)
if !client.sent {
t.Errorf("client should invoke send function")
}
if !reporter.eventCountReported {
t.Errorf("event count should have been reported")
}
if !reporter.eventDispatchTimeReported {
t.Errorf("event dispatch time should have been reported")
}
}
func TestIngressHandler_Receive_NoTTL(t *testing.T) {
client := &fakeClient{}
reporter := &mockReporter{}
handler := Handler{
Logger: zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())),
CeClient: client,
Reporter: reporter,
}
event := cloudevents.NewEvent(cloudevents.VersionV1)
resp := new(cloudevents.EventResponse)
tctx := http.TransportContext{Header: nethttp.Header{}, Method: validHTTPMethod, URI: validURI}
ctx := http.WithTransportContext(context.Background(), tctx)
_ = handler.receive(ctx, event, resp)
if client.sent {
t.Errorf("client should NOT invoke send function")
}
if !reporter.eventCountReported {
t.Errorf("event count should have been reported")
}
}
func TestIngressHandler_Start(t *testing.T) {
client := &fakeClient{}
reporter := &mockReporter{}
handler := Handler{
Logger: zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())),
CeClient: client,
Reporter: reporter,
Defaulter: broker.TTLDefaulter(zap.NewNop(), 5),
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
if err := handler.Start(ctx); err != nil {
t.Error(err)
}
}()
// Need time for the handler to start up. Wait.
for !client.ready() {
time.Sleep(1 * time.Millisecond)
}
event := cloudevents.NewEvent()
client.fakeReceive(t, event)
cancel()
if !client.sent {
t.Errorf("client should invoke send function")
}
if !reporter.eventCountReported {
t.Errorf("event count should have been reported")
}
if !reporter.eventDispatchTimeReported {
t.Errorf("event dispatch time should have been reported")
}
} | "go.uber.org/zap/zaptest"
broker "knative.dev/eventing/pkg/mtbroker"
) |
SignalCellularNull.esm.js | import { __assign } from "tslib";
import * as React from 'react';
import { StyledIconBase } from '../../StyledIconBase';
export var SignalCellularNull = React.forwardRef(function (props, ref) {
var attrs = {
"fill": "currentColor",
"xmlns": "http://www.w3.org/2000/svg",
};
return (React.createElement(StyledIconBase, __assign({ iconAttrs: attrs, iconVerticalAlign: "middle", iconViewBox: "0 0 24 24" }, props, { ref: ref }),
React.createElement("path", { d: "M20 6.83V20H6.83L20 6.83M22 2L2 22h20V2z", key: "k0" }), | React.createElement("path", { fill: "none", d: "M0 0h24v24H0z", key: "k1" })));
});
SignalCellularNull.displayName = 'SignalCellularNull';
export var SignalCellularNullDimensions = { height: 24, width: 24 }; | |
threaded_batched.rs | //! batched, prepared statements and also threaded
//!
//! This builds upon basic_batched version and very similar to the python counterpart `threaded_batched.py`
//!
//! We have a channel, spawn a single writer thread which consumes from queue and writes to SQLite.
//! Then we spawn few more producer threads which generate the data, push to channel.
//!
//! previous: basic_batched.rs
//! next: threaded_str_batched.rs
use rusqlite::{Connection, ToSql};
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender};
use std::thread;
use crate::common::AreaCode;
use fast_sqlite3_inserts as common;
static MIN_BATCH_SIZE: i64 = 50;
enum ParamValues {
WithArea(Vec<(AreaCode, i8, i8)>),
WithoutArea(Vec<(i8, i8)>),
}
fn consumer(rx: Receiver<ParamValues>) |
fn producer(tx: Sender<ParamValues>, count: i64) {
if count < MIN_BATCH_SIZE {
panic!("count cant be less than min batch size");
}
for _ in 0..(count / MIN_BATCH_SIZE) {
let with_area = common::get_random_bool();
let age = common::get_random_age();
let is_active = common::get_random_active();
let mut param_values: Vec<_> = Vec::new();
if with_area {
// lets prepare the batch
let mut vector = Vec::<(AreaCode, i8, i8)>::new();
for _ in 0..MIN_BATCH_SIZE {
let area_code = common::get_random_area_code();
vector.push((area_code, age, is_active));
}
for batch in vector.iter() {
param_values.push(&batch.0 as &dyn ToSql);
param_values.push(&batch.1 as &dyn ToSql);
param_values.push(&batch.2 as &dyn ToSql);
}
// send the values
tx.send(ParamValues::WithArea(vector)).unwrap();
} else {
// lets prepare the batch
let mut vector = Vec::<(i8, i8)>::new();
for _ in 0..MIN_BATCH_SIZE {
vector.push((age, is_active));
}
for batch in vector.iter() {
param_values.push(&batch.0 as &dyn ToSql);
param_values.push(&batch.1 as &dyn ToSql);
}
// send the values
tx.send(ParamValues::WithoutArea(vector)).unwrap();
}
}
}
fn main() {
// setup the DB and tables
let (tx, rx): (Sender<ParamValues>, Receiver<ParamValues>) = mpsc::channel();
// lets launch the consumer
let consumer_handle = thread::spawn(|| consumer(rx));
let cpu_count = num_cpus::get();
let total_rows = 100_000_000;
let each_producer_count = (total_rows / cpu_count) as i64;
let mut handles = Vec::with_capacity(cpu_count);
for _ in 0..cpu_count {
let thread_tx = tx.clone();
handles.push(thread::spawn(move || {
producer(thread_tx, each_producer_count.clone())
}))
}
for t in handles {
t.join().unwrap();
}
drop(tx);
// wait till consumer is exited
consumer_handle.join().unwrap();
}
| {
let mut conn = Connection::open("threaded_batched.db").unwrap();
conn.execute_batch(
"PRAGMA journal_mode = OFF;
PRAGMA synchronous = 0;
PRAGMA cache_size = 1000000;
PRAGMA locking_mode = EXCLUSIVE;
PRAGMA temp_store = MEMORY;",
)
.expect("PRAGMA");
conn.execute(
"CREATE TABLE IF NOT EXISTS user (
id INTEGER not null primary key,
area CHAR(6),
age INTEGER not null,
active INTEGER not null)",
[],
)
.unwrap();
let tx = conn.transaction().unwrap();
{
// TODO: refactor and DRY from basic_batched
// jeez, refactor this!
// this is very similar to the code from basic_batched, check that file to understand
// whats happening here.
let mut with_area_params = " (NULL, ?, ?, ?),".repeat(MIN_BATCH_SIZE as usize);
with_area_params.pop();
let with_area_params = with_area_params.as_str();
let mut without_area_params = " (NULL, NULL, ?, ?),".repeat(MIN_BATCH_SIZE as usize);
without_area_params.pop();
let without_area_params = without_area_params.as_str();
let st1 = format!("INSERT INTO user VALUES {}", with_area_params);
let st2 = format!("INSERT INTO user VALUES {}", without_area_params);
let mut stmt_with_area = tx.prepare_cached(st1.as_str()).unwrap();
let mut stmt_without_area = tx.prepare_cached(st2.as_str()).unwrap();
for param_values in rx {
let mut row_values: Vec<&dyn ToSql> = Vec::new();
match param_values {
ParamValues::WithArea(values) => {
for batch in values.iter() {
row_values.push(&batch.0 as &dyn ToSql);
row_values.push(&batch.1 as &dyn ToSql);
row_values.push(&batch.2 as &dyn ToSql);
}
stmt_with_area.execute(&*row_values).unwrap();
}
ParamValues::WithoutArea(values) => {
for batch in values.iter() {
row_values.push(&batch.0 as &dyn ToSql);
row_values.push(&batch.1 as &dyn ToSql);
}
stmt_without_area.execute(&*row_values).unwrap();
}
}
}
}
tx.commit().unwrap();
} |
place.rs | //!
//! The place expression translator.
//!
use std::convert::TryFrom;
use crate::generator::expression::operand::Operand as GeneratorExpressionOperand;
use crate::semantic::analyzer::expression::hint::Hint as TranslationHint;
use crate::semantic::element::error::Error as ElementError;
use crate::semantic::element::place::Place;
use crate::semantic::element::value::Value;
use crate::semantic::element::Element;
use crate::semantic::error::Error;
pub struct Translator {}
impl Translator {
///
/// Translates the place expression to a semantic expression type specified in `hint`.
///
pub fn translate(
place: Place,
hint: TranslationHint,
) -> Result<(Element, Option<GeneratorExpressionOperand>), Error> |
}
| {
match hint {
TranslationHint::Value => {
let element = Value::try_from(&place.r#type)
.map(Element::Value)
.map_err(ElementError::Value)
.map_err(|error| Error::Element(place.location, error))?;
Ok((
element,
Some(GeneratorExpressionOperand::Place(place.into())),
))
}
_ => Ok((Element::Place(place), None)),
}
} |
main.rs | #![deny(clippy::all)]
#![forbid(unsafe_code)]
use log::error;
use pixels::{Error, Pixels, SurfaceTexture};
use winit::dpi::LogicalSize;
use winit::event::{Event, VirtualKeyCode};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::WindowBuilder;
use winit_input_helper::WinitInputHelper;
const WIDTH: u32 = 320;
const HEIGHT: u32 = 240;
const BOX_SIZE: i16 = 64;
/// Representation of the application state. In this example, a box will bounce around the screen.
struct World {
box_x: i16,
box_y: i16,
velocity_x: i16,
velocity_y: i16,
}
fn main() -> Result<(), Error> {
env_logger::init();
let event_loop = EventLoop::new(); | .with_title("Hello Pixels")
.with_inner_size(size)
.with_min_inner_size(size)
.build(&event_loop)
.unwrap()
};
let mut pixels = {
let window_size = window.inner_size();
let surface_texture = SurfaceTexture::new(window_size.width, window_size.height, &window);
Pixels::new(WIDTH, HEIGHT, surface_texture)?
};
let mut world = World::new();
event_loop.run(move |event, _, control_flow| {
// Draw the current frame
if let Event::RedrawRequested(_) = event {
world.draw(pixels.get_frame());
if pixels
.render()
.map_err(|e| error!("pixels.render() failed: {}", e))
.is_err()
{
*control_flow = ControlFlow::Exit;
return;
}
}
// Handle input events
if input.update(&event) {
// Close events
if input.key_pressed(VirtualKeyCode::Escape) || input.quit() {
*control_flow = ControlFlow::Exit;
return;
}
// Resize the window
if let Some(size) = input.window_resized() {
pixels.resize(size.width, size.height);
}
// Update internal state and request a redraw
world.update();
window.request_redraw();
}
});
}
impl World {
/// Create a new `World` instance that can draw a moving box.
fn new() -> Self {
Self {
box_x: 24,
box_y: 16,
velocity_x: 1,
velocity_y: 1,
}
}
/// Update the `World` internal state; bounce the box around the screen.
fn update(&mut self) {
if self.box_x <= 0 || self.box_x + BOX_SIZE > WIDTH as i16 {
self.velocity_x *= -1;
}
if self.box_y <= 0 || self.box_y + BOX_SIZE > HEIGHT as i16 {
self.velocity_y *= -1;
}
self.box_x += self.velocity_x;
self.box_y += self.velocity_y;
}
/// Draw the `World` state to the frame buffer.
///
/// Assumes the default texture format: `wgpu::TextureFormat::Rgba8UnormSrgb`
fn draw(&self, frame: &mut [u8]) {
for (i, pixel) in frame.chunks_exact_mut(4).enumerate() {
let x = (i % WIDTH as usize) as i16;
let y = (i / WIDTH as usize) as i16;
let inside_the_box = x >= self.box_x
&& x < self.box_x + BOX_SIZE
&& y >= self.box_y
&& y < self.box_y + BOX_SIZE;
let rgba = if inside_the_box {
[0x5e, 0x48, 0xe8, 0xff]
} else {
[0x48, 0xb2, 0xe8, 0xff]
};
pixel.copy_from_slice(&rgba);
}
}
} | let mut input = WinitInputHelper::new();
let window = {
let size = LogicalSize::new(WIDTH as f64, HEIGHT as f64);
WindowBuilder::new() |
closure.rs | #![allow(unused_assignments, unused_variables)]
// compile-flags: -C opt-level=2 # fix described in rustc_middle/mir/mono.rs
fn main() | {
// Initialize test constants in a way that cannot be determined at compile time, to ensure
// rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
// dependent conditions.
let is_true = std::env::args().len() == 1;
let is_false = ! is_true;
let mut some_string = Some(String::from("the string content"));
println!(
"The string or alt: {}"
,
some_string
.
unwrap_or_else
(
||
{
let mut countdown = 0;
if is_false {
countdown = 10;
}
"alt string 1".to_owned()
}
)
);
some_string = Some(String::from("the string content"));
let
a
=
||
{
let mut countdown = 0;
if is_false {
countdown = 10;
}
"alt string 2".to_owned()
};
println!(
"The string or alt: {}"
,
some_string
.
unwrap_or_else
(
a
)
);
some_string = None;
println!(
"The string or alt: {}"
,
some_string
.
unwrap_or_else
(
||
{
let mut countdown = 0;
if is_false {
countdown = 10;
}
"alt string 3".to_owned()
}
)
);
some_string = None;
let
a
=
||
{
let mut countdown = 0;
if is_false {
countdown = 10;
}
"alt string 4".to_owned()
};
println!(
"The string or alt: {}"
,
some_string
.
unwrap_or_else
(
a
)
);
let
quote_closure
=
|val|
{
let mut countdown = 0;
if is_false {
countdown = 10;
}
format!("'{}'", val)
};
println!(
"Repeated, quoted string: {:?}"
,
std::iter::repeat("repeat me")
.take(5)
.map
(
quote_closure
)
.collect::<Vec<_>>()
);
let
_unused_closure
=
|
mut countdown
|
{
if is_false {
countdown = 10;
}
"closure should be unused".to_owned()
};
let mut countdown = 10;
let _short_unused_closure = | _unused_arg: u8 | countdown += 1;
// Macros can sometimes confuse the coverage results. Compare this next assignment, with an
// unused closure that invokes the `println!()` macro, with the closure assignment above, that
// does not use a macro. The closure above correctly shows `0` executions.
let _short_unused_closure = | _unused_arg: u8 | println!("not called");
// The closure assignment above is executed, with a line count of `1`, but the `println!()`
// could not have been called, and yet, there is no indication that it wasn't...
// ...but adding block braces gives the expected result, showing the block was not executed.
let _short_unused_closure_block = | _unused_arg: u8 | { println!("not called") };
let _shortish_unused_closure = | _unused_arg: u8 | {
println!("not called")
};
let _as_short_unused_closure = |
_unused_arg: u8
| { println!("not called") };
let _almost_as_short_unused_closure = |
_unused_arg: u8
| { println!("not called") }
;
} |
|
server.go | package main
import (
"fmt"
"github.com/avast/retry-go"
"github.com/ganlvtech/go-kahla-notify/cryptojs"
"github.com/ganlvtech/go-kahla-notify/kahla"
"github.com/gin-gonic/gin"
"log"
"net/http"
"sync"
)
const (
ResponseCodeOK = iota
ResponseCodeNoAccessToken
ResponseCodeNoContent
ResponseCodeInvalidAccessToken
ResponseCodeSendMessageFailed
)
type Conversation struct {
Token string
ConversationID int
UserID string
AesKey string
}
type Conversations []*Conversation
type ConversationNotFound struct{}
func (*ConversationNotFound) Error() string {
return "conversation not found"
}
func (c *Conversations) KeyByConversationID() map[int]*Conversation {
result := make(map[int]*Conversation)
for _, v := range *c {
result[v.ConversationID] = v
}
return result
}
func (c *Conversations) GetByConversationID(conversationId int) (*Conversation, error) {
for _, v := range *c {
if v.ConversationID == conversationId {
return v, nil
}
}
return nil, &ConversationNotFound{}
}
func (c *Conversations) GetByToken(token string) (*Conversation, error) {
for _, v := range *c {
if v.Token == token {
return v, nil
}
}
return nil, &ConversationNotFound{}
}
type TokenNotExists struct{}
func (t *TokenNotExists) Error() string {
return "token not exists"
}
type NotifyServer struct {
email string
password string
port int
serverPath string
client *kahla.Client
webSocket *kahla.WebSocket
httpServer *http.Server
friendRequestChan chan struct{}
updateConversationsChan chan struct{}
sendNewTokensChan chan struct{}
conversations *Conversations
}
func NewNotifyServer(email string, password string, port int) *NotifyServer |
func (s *NotifyServer) login() error {
log.Println("Login as user:", s.email)
err := retry.Do(func() error {
_, err := s.client.Auth.Login(s.email, s.password)
if err != nil {
log.Println("Login failed:", err, "Retry.")
return err
}
return nil
})
if err != nil {
log.Println("Login failed too many times:", err)
return err
}
log.Println("Login OK.")
s.UpdateConversations()
return nil
}
func (s *NotifyServer) initPusher() error {
log.Println("Initializing pusher.")
err := retry.Do(func() error {
response, err := s.client.Auth.InitPusher()
if err != nil {
log.Println("Initialize pusher failed:", err, "Retry.")
return err
}
s.serverPath = response.ServerPath
return nil
})
if err != nil {
log.Println("Initialize pusher failed too many times:", err)
return err
}
log.Println("Initialize pusher OK.")
return nil
}
// Synchronize call. Return when connection closed or disconnected.
func (s *NotifyServer) connectToPusher(interrupt <-chan struct{}) error {
log.Println("Connecting to pusher.")
err := retry.Do(func() error {
go func() {
state := <-s.webSocket.StateChanged
if state == kahla.WebSocketStateConnected {
log.Println("Connected to pusher OK.")
}
}()
err := s.webSocket.Connect(s.serverPath, interrupt)
if err != nil {
if s.webSocket.State == kahla.WebSocketStateClosed {
log.Println("Interrupt:", err)
return nil
} else if s.webSocket.State == kahla.WebSocketStateDisconnected {
log.Println("Disconnected:", err, "Retry.")
return err
}
log.Println("State:", s.webSocket.State, "Error:", err, "Retry.")
return err
}
log.Println("Interrupt.")
return nil
})
if err != nil {
log.Println("Connected to pusher failed too many times:", err)
return err
}
return nil
}
func (s *NotifyServer) runWebSocket(interrupt <-chan struct{}, done chan<- struct{}) {
defer close(done)
for {
err := s.login()
if err != nil {
continue
}
err = s.initPusher()
if err != nil {
continue
}
err = s.connectToPusher(interrupt)
if err != nil {
continue
}
// Interrupt
break
}
}
func (s *NotifyServer) runEventListener(interrupt <-chan struct{}, done chan<- struct{}) {
defer close(done)
for {
select {
case <-interrupt:
log.Println("Event listener stopped.")
return
case i := <-s.webSocket.Event:
switch v := i.(type) {
case *kahla.NewMessageEvent:
content, err := cryptojs.AesDecrypt(v.Content, v.AesKey)
if err != nil {
log.Println(err)
} else {
title := "New message"
message := v.Sender.NickName + ": " + content
log.Println(title, ":", message)
if err != nil {
log.Println(err)
}
if content == "refresh token" {
conversation, err := s.conversations.GetByConversationID(v.ConversationID)
if err != nil {
log.Println(err)
}
conversation.Token = ""
s.SendNewTokens()
}
}
case *kahla.NewFriendRequestEvent:
title := "Friend request"
message := "You have got a new friend request!"
log.Println(title, ":", message, "nick name:", v.Requester.NickName, "id:", v.Requester.ID)
s.AcceptFriendRequest()
case *kahla.WereDeletedEvent:
title := "Were deleted"
message := "You were deleted by one of your friends from his friend list."
log.Println(title, ":", message, "nick name:", v.Trigger.NickName, "id:", v.Trigger.ID)
s.UpdateConversations()
case *kahla.FriendAcceptedEvent:
title := "Friend request"
message := "Your friend request was accepted!"
log.Println(title, ":", message, "nick name:", v.Target.NickName, "id:", v.Target.ID)
case *kahla.TimerUpdatedEvent:
title := "Self-destruct timer updated!"
message := fmt.Sprintf("Your current message life time is: %d", v.NewTimer)
log.Println(title, ":", message, "conversation id:", v.ConversationID)
default:
panic("invalid event type")
}
}
}
}
func (s *NotifyServer) newHttpServer() {
r := gin.Default()
r.GET("/", func(c *gin.Context) {
c.Redirect(302, "https://github.com/ganlvtech/go-kahla-notify-server")
})
r.GET("/send", func(c *gin.Context) {
token := c.Query("token")
if token == "" {
c.JSON(401, gin.H{
"code": ResponseCodeNoAccessToken,
"message": "No access token provided.",
})
return
}
content := c.Query("content")
if content == "" {
c.JSON(400, gin.H{
"code": ResponseCodeNoContent,
"message": "Content is required.",
})
return
}
err := s.SendMessageByToken(token, content)
if err != nil {
_, ok := err.(*TokenNotExists)
if ok {
c.JSON(401, gin.H{
"code": ResponseCodeInvalidAccessToken,
"message": "Invalid access token.",
})
return
}
c.JSON(500, gin.H{
"code": ResponseCodeSendMessageFailed,
"msg": "Send message failed. " + err.Error(),
})
return
}
c.JSON(200, gin.H{
"code": ResponseCodeOK,
"msg": "OK",
})
})
s.httpServer = &http.Server{
Addr: fmt.Sprintf(":%d", s.port),
Handler: r,
}
}
func (s *NotifyServer) runHttpServer(interrupt <-chan struct{}, done chan<- struct{}) {
defer close(done)
go func() {
<-interrupt
err := s.httpServer.Close()
if err != nil {
log.Println("Server close error.", err)
}
}()
err := s.httpServer.ListenAndServe()
if err != nil {
if err == http.ErrServerClosed {
log.Println("Server closed under request.")
} else {
log.Println("Server closed unexpect.", err)
}
}
}
func (s *NotifyServer) acceptFriendRequest() error {
response, err := s.client.Friendship.MyRequests()
if err != nil {
log.Println("Get my friend request failed:", err)
return err
}
var err1 error
for _, v := range response.Items {
if !v.Completed {
_, err := s.client.Friendship.CompleteRequest(v.ID, true)
if err != nil {
log.Println("Complete friend request failed:", err)
if err1 == nil {
err1 = err
}
continue
}
log.Println("Complete friend request:", v.Creator.NickName)
s.UpdateConversations()
}
}
return err1
}
func (s *NotifyServer) AcceptFriendRequest() {
select {
case s.friendRequestChan <- struct{}{}:
log.Println("New friend request task added.")
go func() {
err := s.acceptFriendRequest()
if err != nil {
log.Println(err)
}
<-s.friendRequestChan
}()
default:
log.Println("Friend request task exists. Ignore.")
}
}
func (s *NotifyServer) updateConversations() error {
response, err := s.client.Friendship.MyFriends(false)
if err != nil {
log.Println("Update conversations failed.", err)
return err
}
conversationsMap := s.conversations.KeyByConversationID()
conversations := make(Conversations, 0, len(*s.conversations)+len(response.Items))
for _, v := range response.Items {
v1, ok := conversationsMap[v.ConversationID]
if ok {
conversations = append(conversations, v1)
} else {
conversations = append(conversations, &Conversation{
ConversationID: v.ConversationID,
Token: "",
AesKey: v.AesKey,
UserID: v.UserID,
})
s.SendNewTokens()
}
}
s.conversations = &conversations
return nil
}
func (s *NotifyServer) UpdateConversations() {
select {
case s.updateConversationsChan <- struct{}{}:
log.Println("Update conversation task added.")
go func() {
err := s.updateConversations()
if err != nil {
log.Println(err)
}
<-s.updateConversationsChan
}()
default:
log.Println("Update conversation task exists. Ignore.")
}
}
func (s *NotifyServer) sendNewTokens() error {
var err1 error
for _, v := range *s.conversations {
if v.Token == "" {
v.Token = randomString(32)
err := s.SendMessage(v.ConversationID, v.Token, v.AesKey)
if err != nil {
log.Println("Send new token failed.", err, "UserID:", v.UserID)
if err1 == nil {
err1 = err
}
continue
}
log.Println("Send new token OK")
}
}
return err1
}
func (s *NotifyServer) SendNewTokens() {
select {
case s.sendNewTokensChan <- struct{}{}:
log.Println("Send new tokens task added.")
go func() {
err := s.sendNewTokens()
if err != nil {
log.Println(err)
}
<-s.sendNewTokensChan
}()
default:
log.Println("Send new tokens task exists. Ignore.")
}
}
func (s *NotifyServer) SendRawMessage(conversationId int, content string) error {
err := retry.Do(func() error {
_, err := s.client.Conversation.SendMessage(conversationId, content)
if err != nil {
return err
}
return nil
})
if err != nil {
log.Println("Send message failed.")
return err
}
return nil
}
func (s *NotifyServer) SendMessage(conversationId int, content string, aesKey string) error {
content, err := cryptojs.AesEncrypt(content, aesKey)
if err != nil {
return err
}
return s.SendRawMessage(conversationId, content)
}
func (s *NotifyServer) SendMessageByToken(token string, content string) error {
if s.conversations == nil {
return &TokenNotExists{}
}
conversation, err := s.conversations.GetByToken(token)
if err != nil {
return &TokenNotExists{}
}
return s.SendMessage(conversation.ConversationID, content, conversation.AesKey)
}
func (s *NotifyServer) Run(interrupt <-chan struct{}) error {
interrupt1 := make(chan struct{})
interrupt2 := make(chan struct{})
interrupt3 := make(chan struct{})
go func() {
<-interrupt
close(interrupt1)
close(interrupt2)
close(interrupt3)
}()
done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})
var wg sync.WaitGroup
wg.Add(3)
go func() {
s.runWebSocket(interrupt1, done1)
wg.Done()
}()
go func() {
s.runEventListener(interrupt2, done2)
wg.Done()
}()
go func() {
s.runHttpServer(interrupt3, done3)
wg.Done()
}()
wg.Wait()
log.Println("Kahla client stopped.")
return nil
}
| {
s := &NotifyServer{}
s.email = email
s.password = password
s.port = port
s.client = kahla.NewClient()
s.webSocket = kahla.NewWebSocket()
s.newHttpServer()
s.friendRequestChan = make(chan struct{}, 1)
s.updateConversationsChan = make(chan struct{}, 1)
s.sendNewTokensChan = make(chan struct{}, 1)
conversations := make(Conversations, 0)
s.conversations = &conversations
return s
} |
issue-17718-static-sync.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(optin_builtin_traits)]
use std::marker::Sync;
struct Foo;
impl !Sync for Foo {}
static FOO: usize = 3;
static BAR: Foo = Foo;
//~^ ERROR: `Foo: std::marker::Sync` is not satisfied
fn main() {} | |
operations.go | package notificationhubs
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// OperationsClient is the azure NotificationHub client
type OperationsClient struct {
BaseClient
}
// NewOperationsClient creates an instance of the OperationsClient client.
func NewOperationsClient(subscriptionID string) OperationsClient {
return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List lists all of the available NotificationHubs REST API operations.
func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
defer func() {
sc := -1
if result.olr.Response.Response != nil {
sc = result.olr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.olr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "List", resp, "Failure sending request")
return
}
result.olr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "List", resp, "Failure responding to request")
}
| func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
const APIVersion = "2017-04-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPath("/providers/Microsoft.NotificationHubs/operations"),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
req, err := lastResults.operationListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "notificationhubs.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
} | return
}
// ListPreparer prepares the List request. |
index.d.ts | // Type definitions for zen-observable 0.5
// Project: https://github.com/zenparsing/zen-observable
// Definitions by: Kombu <https://github.com/aicest>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare global {
interface SymbolConstructor {
observable: symbol;
}
namespace ZenObservable {
interface SubscriptionObserver<T> {
closed: boolean;
next(value: T): void;
error(errorValue: any): void;
complete(): void;
}
interface Subscription {
closed: boolean;
unsubscribe(): void;
}
interface Observer<T> {
start?(subscription: Subscription): any;
next?(value: T): void;
error?(errorValue: any): void;
complete?(): void;
}
type Subscriber<T> = (observer: SubscriptionObserver<T>) => void | (() => void) | Subscription;
interface ObservableLike<T> {
subscribe?: Subscriber<T>;
[Symbol.observable](): Observable<T> | ObservableLike<T>;
}
}
}
declare class | <T> {
constructor(subscriber: ZenObservable.Subscriber<T>)
subscribe(observer: ZenObservable.Observer<T>): ZenObservable.Subscription;
subscribe(onNext: (value: T) => void, onError?: (error: any) => void, onComplete?: () => void): ZenObservable.Subscription;
[Symbol.observable](): Observable<T>;
forEach(callback: (value: T) => void): Promise<void>;
map<R>(callback: (value: T) => R): Observable<R>;
filter(callback: (value: T) => boolean): Observable<T>;
reduce(callback: (previousValue: T, currentValue: T) => T, initialValue?: T): Observable<T>;
reduce<R>(callback: (previousValue: R, currentValue: T) => R, initialValue?: R): Observable<R>;
flatMap<R>(callback: (value: T) => ZenObservable.ObservableLike<R>): Observable<R>;
static from<R>(observable: Observable<R> | ZenObservable.ObservableLike<R> | ArrayLike<R>): Observable<R>;
static of<R>(...items: R[]): Observable<R>;
}
declare namespace Observable {
}
export = Observable;
| Observable |
group.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService(object):
def __init__(self, session, user_fetcher):
"""
Create a new groups service.
:param session: the SQLAlchemy session object
:param user_fetcher: a callable for fetching users by userid
:param publish: a callable for publishing events
"""
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid): | """
Fetch a group using either a groupid or a pubid.
:arg pubid_or_groupid: a string in either :mod:`~h.pubid` format
or as :attr:`h.models.Group.groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
"""Return a group with the given ``pubid`` or ``None``."""
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
"""
Return a group with the given ``groupid`` or ``None``.
:arg groupid: String in groupid format, e.g. ``group:[email protected]``.
See :class:`~h.models.Group`
:raises ValueError: if ``groupid`` is not a valid groupid.
See :func:`h.util.group.split_groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
"""
Return a Query of all Groups, optionally filtered by name.
If ``name`` is present, groups will be filtered by name. Filtering
is case-insensitive and wildcarded. Otherwise, all groups will be
retrieved.
:rtype: sqlalchemy.orm.query.Query
"""
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
"""
Return a list of pubids for which the user has read access.
If the passed-in user is ``None``, this returns the list of
world-readable groups.
:type user: `h.models.user.User`
"""
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
"""
Return a list of pubids which the user created.
If the passed-in user is ``None``, this returns an empty list.
:type user: `h.models.user.User` or None
"""
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
"""Return a GroupService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch) | |
consumer.go | package kongstate
import (
"fmt"
"github.com/kong/go-kong/kong"
configurationv1 "github.com/kong/kubernetes-ingress-controller/pkg/apis/configuration/v1"
"github.com/mitchellh/mapstructure"
"github.com/sirupsen/logrus"
)
// Consumer holds a Kong consumer and its plugins and credentials.
type Consumer struct {
kong.Consumer
Plugins []kong.Plugin
KeyAuths []*kong.KeyAuth
HMACAuths []*kong.HMACAuth
JWTAuths []*kong.JWTAuth
BasicAuths []*kong.BasicAuth
ACLGroups []*kong.ACLGroup
Oauth2Creds []*kong.Oauth2Credential
K8sKongConsumer configurationv1.KongConsumer
}
func (c *Consumer) SetCredential(log logrus.FieldLogger, credType string, credConfig interface{}) error {
switch credType {
case "key-auth", "keyauth_credential":
var cred kong.KeyAuth
err := decodeCredential(credConfig, &cred)
if err != nil {
return fmt.Errorf("failed to decode key-auth credential: %w", err)
}
// TODO we perform these validity checks here because passing credentials without these fields will panic deck
// later on. Ideally this should not be handled in the controller, but we cannot currently handle it elsewhere
// (i.e. in deck or go-kong) without entering a sync failure loop that cannot actually report the problem
// piece of configuration. if we can address those limitations, we should remove these checks.
// See https://github.com/Kong/deck/pull/223 and https://github.com/Kong/kubernetes-ingress-controller/issues/532
// for more discussion.
if cred.Key == nil {
return fmt.Errorf("key-auth for consumer %s is invalid: no key", *c.Username)
}
c.KeyAuths = append(c.KeyAuths, &cred)
case "basic-auth", "basicauth_credential":
var cred kong.BasicAuth
err := decodeCredential(credConfig, &cred)
if err != nil {
return fmt.Errorf("failed to decode basic-auth credential: %w", err)
}
if cred.Username == nil {
return fmt.Errorf("basic-auth for consumer %s is invalid: no username", *c.Username)
}
c.BasicAuths = append(c.BasicAuths, &cred)
case "hmac-auth", "hmacauth_credential":
var cred kong.HMACAuth
err := decodeCredential(credConfig, &cred)
if err != nil {
return fmt.Errorf("failed to decode hmac-auth credential: %w", err)
}
if cred.Username == nil {
return fmt.Errorf("hmac-auth for consumer %s is invalid: no username", *c.Username)
}
c.HMACAuths = append(c.HMACAuths, &cred)
case "oauth2":
var cred kong.Oauth2Credential
err := decodeCredential(credConfig, &cred)
if err != nil {
return fmt.Errorf("failed to decode oauth2 credential: %w", err)
}
if cred.ClientID == nil {
return fmt.Errorf("oauth2 for consumer %s is invalid: no client_id", *c.Username)
}
c.Oauth2Creds = append(c.Oauth2Creds, &cred)
case "jwt", "jwt_secret":
var cred kong.JWTAuth
err := decodeCredential(credConfig, &cred)
if err != nil {
log.Errorf("failed to process JWT credential: %v", err)
}
// This is treated specially because only this
// field might be omitted by user under the expectation
// that Kong will insert the default.
// If we don't set it, decK will detect a diff and PUT this
// credential everytime it performs a sync operation, which
// leads to unnecessary cache invalidations in Kong.
if cred.Algorithm == nil || *cred.Algorithm == "" {
cred.Algorithm = kong.String("HS256")
}
if cred.Key == nil {
return fmt.Errorf("jwt-auth for consumer %s is invalid: no key", *c.Username)
}
c.JWTAuths = append(c.JWTAuths, &cred)
case "acl":
var cred kong.ACLGroup
err := decodeCredential(credConfig, &cred)
if err != nil {
log.Errorf("failed to process ACL group: %v", err)
}
if cred.Group == nil {
return fmt.Errorf("acl for consumer %s is invalid: no group", *c.Username)
}
c.ACLGroups = append(c.ACLGroups, &cred)
default:
return fmt.Errorf("invalid credential type: '%v'", credType)
}
return nil
}
func | (credConfig interface{},
credStructPointer interface{}) error {
decoder, err := mapstructure.NewDecoder(
&mapstructure.DecoderConfig{TagName: "json",
Result: credStructPointer,
})
if err != nil {
return fmt.Errorf("failed to create a decoder: %w", err)
}
err = decoder.Decode(credConfig)
if err != nil {
return fmt.Errorf("failed to decode credential: %w", err)
}
return nil
}
| decodeCredential |
input.go | package utils
import (
"bufio"
"io" |
//ReadPipe reads data from a pipe
func ReadPipe(pipe io.ReadCloser) []byte {
output := []byte{}
scanner := bufio.NewScanner(pipe)
for scanner.Scan() {
for _, b := range scanner.Bytes() {
output = append(output, b)
}
output = append(output, byte('\n'))
}
return output
} | ) |
logging_presets.py | import logging
from . import common_functions as c_f
import os
import torch
from collections import defaultdict
| # You can write your own hooks for logging.
# But if you'd like something that just works, then use this HookContainer.
# You'll need to install record-keeper and tensorboard.
# pip install record-keeper tensorboard
class HookContainer:
def __init__(self, record_keeper,
record_group_name_prefix=None,
primary_metric="mean_average_precision_at_r",
validation_split_name="val"):
self.record_keeper = record_keeper
self.record_group_name_prefix = record_group_name_prefix
self.saveable_trainer_objects = ["models", "optimizers", "lr_schedulers", "loss_funcs", "mining_funcs"]
self.primary_metric = primary_metric
self.validation_split_name = validation_split_name
############################################
############################################
################## HOOKS #################
############################################
############################################
### Define the end_of_iteration hook. This will be executed at the end of every iteration. ###
def end_of_iteration_hook(self, trainer):
record_these = [[trainer.loss_tracker.losses, {"input_group_name_for_non_objects": "loss_histories"}],
[trainer.loss_tracker.loss_weights, {"input_group_name_for_non_objects": "loss_weights"}],
[trainer.loss_funcs, {"recursive_types": [torch.nn.Module]}],
[trainer.mining_funcs, {}],
[trainer.models, {}],
[trainer.optimizers, {"custom_attr_func": self.optimizer_custom_attr_func}]]
for record, kwargs in record_these:
self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)
# This hook will be passed into the trainer and will be executed at the end of every epoch.
def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, test_collate_fn=None):
if not self.primary_metric in tester.accuracy_calculator.get_curr_metrics():
raise ValueError("HookContainer `primary_metric` must be one of: {}".format(tester.accuracy_calculator.get_curr_metrics()))
if not os.path.exists(model_folder): os.makedirs(model_folder)
def actual_hook(trainer):
continue_training = True
if trainer.epoch % test_interval == 0:
best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, test_collate_fn)
continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)
return continue_training
return actual_hook
def end_of_testing_hook(self, tester):
for split_name, accuracies in tester.all_accuracies.items():
epoch = accuracies["epoch"]
self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
_, _, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, split_name, epoch)
best = {"best_epoch":best_epoch, "best_accuracy": best_accuracy}
self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
for split_name, u in tester.dim_reduced_embeddings.items():
for k, (dim_reduced, labels) in u.items():
tag = '%s/%s'%(self.record_group_name(tester, split_name), k)
self.record_keeper.add_embedding_plot(dim_reduced, labels, tag, epoch)
############################################
############################################
######### MODEL LOADING AND SAVING #########
############################################
############################################
def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):
if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
resume_epoch, model_suffix = c_f.latest_version(model_folder, "trunk_*.pth", best=best)
if resume_epoch > 0:
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)
return resume_epoch + 1
def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)
if prev_suffix is not None:
c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder)
def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, collate_fn):
epoch = trainer.epoch
tester.test(dataset_dict, epoch, trainer.models["trunk"], trainer.models["embedder"], list(dataset_dict.keys()), collate_fn)
prev_best_epoch, _ = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)
is_new_best, curr_accuracy, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)
self.record_keeper.save_records()
trainer.step_lr_plateau_schedulers(curr_accuracy)
self.save_models(trainer, model_folder, epoch, epoch-test_interval) # save latest model
if is_new_best:
logging.info("New best accuracy! {}".format(curr_accuracy))
curr_suffix = "best%d"%best_epoch
prev_suffix = "best%d"%prev_best_epoch if prev_best_epoch is not None else None
self.save_models(trainer, model_folder, curr_suffix, prev_suffix) # save best model
return best_epoch
def is_new_best_accuracy(self, tester, split_name, epoch):
curr_accuracy = self.get_curr_primary_metric(tester, split_name)
best_epoch, best_accuracy = self.get_best_epoch_and_accuracy(tester, split_name)
is_new_best = False
if (curr_accuracy > best_accuracy) or (best_epoch is None):
best_epoch, best_accuracy = epoch, curr_accuracy
is_new_best = True
return is_new_best, curr_accuracy, best_epoch, best_accuracy
############################################
############################################
##### BEST EPOCH AND ACCURACY TRACKING #####
############################################
############################################
def get_loss_history(self, loss_names=()):
columns = "*" if len(loss_names) == 0 else ", ".join(loss_names)
table_name = "loss_histories"
if not self.record_keeper.table_exists(table_name):
return {}
output = self.record_keeper.query("SELECT {} FROM {}".format(columns, table_name), return_dict=True)
output.pop("id", None)
return output
def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return {}
def get_accuracies(keys):
keys = "*" if return_all_metrics else "epoch, %s"%keys
query = "SELECT {} FROM {}".format(keys, table_name)
return self.record_keeper.query(query, return_dict=True)
keys = metrics if len(metrics) > 0 else [self.primary_metric]
output = self.try_keys(keys, tester, get_accuracies)
output.pop("id", None)
return output
def get_curr_primary_metric(self, tester, split_name):
def get_curr(key):
return tester.all_accuracies[split_name][key]
return self.try_primary_metric(tester, get_curr)
def try_keys(self, input_keys, tester, input_func):
for average in [True, False]:
keys = ", ".join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])
try:
return input_func(keys)
except (KeyError, sqlite3.OperationalError):
pass
raise KeyError
def try_primary_metric(self, tester, input_func):
return self.try_keys([self.primary_metric], tester, input_func)
# returns accuracies of a specified epoch
def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return []
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
query = "SELECT %s FROM %s WHERE epoch=?"%(columns, table_name)
return self.record_keeper.query(query, (epoch, ))
return self.try_primary_metric(tester, get_accuracies)
# returns accuracies of best epoch and the metric name used to determine best acuracy
def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=(-1,)):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return [], None
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
params = ", ".join(["?"]*len(ignore_epoch))
query = """SELECT {0} FROM {1} WHERE {2}=
(SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))
AND epoch NOT IN ({3})""".format(columns, table_name, key, params)
output = self.record_keeper.query(query, ignore_epoch+ignore_epoch)
return output, key
return self.try_primary_metric(tester, get_accuracies)
def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=(-1,)):
accuracies, key = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)
if len(accuracies) > 0:
return accuracies[0]["epoch"], accuracies[0][key]
return None, 0
def patience_remaining(self, epoch, best_epoch, patience):
if patience is not None and best_epoch is not None:
if epoch - best_epoch > patience:
logging.info("Validation accuracy has plateaued. Exiting.")
return False
return True
def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):
if skip_eval_if_already_done:
splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)
if len(splits_to_eval) == 0:
logging.info("Already evaluated")
return False
tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)
return True
def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):
input_splits_to_eval = list(dataset_dict.keys()) if input_splits_to_eval is None else input_splits_to_eval
splits_to_eval = []
for split in input_splits_to_eval:
if len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0:
splits_to_eval.append(split)
return splits_to_eval
def base_record_group_name(self, tester):
base_record_group_name = "%s_"%self.record_group_name_prefix if self.record_group_name_prefix else ''
base_record_group_name += tester.description_suffixes("accuracies")
return base_record_group_name
def record_group_name(self, tester, split_name):
base_record_group_name = self.base_record_group_name(tester)
return "%s_%s"%(base_record_group_name, split_name.upper())
def optimizer_custom_attr_func(self, optimizer):
return {"lr": optimizer.param_groups[0]["lr"]}
class EmptyContainer:
def end_of_epoch_hook(self, *args):
return None
end_of_iteration_hook = None
end_of_testing_hook = None
def get_record_keeper(csv_folder, tensorboard_folder, global_db_path=None, experiment_name=None, is_new_experiment=True, save_figures=False, save_lists=False):
try:
import record_keeper as record_keeper_package
from torch.utils.tensorboard import SummaryWriter
record_writer = record_keeper_package.RecordWriter(folder = csv_folder,
global_db_path = global_db_path,
experiment_name = experiment_name,
is_new_experiment = is_new_experiment,
save_lists = save_lists)
tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)
record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer = tensorboard_writer,
record_writer = record_writer,
attributes_to_search_for = c_f.list_of_recordable_attributes_list_names(),
save_figures=save_figures)
return record_keeper, record_writer, tensorboard_writer
except ModuleNotFoundError as e:
logging.warn(e)
logging.warn("There won't be any logging or model saving.")
logging.warn("To fix this, pip install record-keeper tensorboard")
return None, None, None
def get_hook_container(record_keeper, **kwargs):
if record_keeper:
return HookContainer(record_keeper, **kwargs)
else:
logging.warn("No record_keeper, so no preset hooks are being returned.")
return EmptyContainer() | import sqlite3
|
scrobble_monitor.rs | use std::{
sync::Arc,
time::{Duration, Instant},
};
use reqwest::Client;
use tokio::sync::RwLock;
#[derive(Debug, Clone)]
struct Scrobble {
data: String,
fetch_time: Instant,
}
#[derive(Debug, Clone)]
pub struct ScrobbleMonitor {
client: Client,
api_key: String,
last_scrobble: Arc<RwLock<Option<Scrobble>>>,
}
impl ScrobbleMonitor {
pub fn new(api_key: String) -> Self {
Self {
client: Client::new(),
api_key,
last_scrobble: Arc::new(RwLock::new(None)),
}
}
pub async fn get_scrobble(&mut self) -> anyhow::Result<String> {
let is_fresh = |fetch_time: &Instant| fetch_time.elapsed() < Duration::from_secs(30);
if let Some(scrobble) = &*self.last_scrobble.read().await {
if is_fresh(&scrobble.fetch_time) |
}
let mut last_scrobble = self.last_scrobble.write().await;
match &*last_scrobble {
// make sure another task hasn't fetched the new data first after we
// both waited for write access
Some(scrobble) if is_fresh(&scrobble.fetch_time) => {
tracing::debug!("returning (very) recently fetched scrobble data");
Ok(scrobble.data.clone())
}
_ => {
tracing::debug!("fetching new scrobble data");
let latest = self.fetch_scrobble().await?;
*last_scrobble = Some(Scrobble {
data: latest.clone(),
fetch_time: Instant::now(),
});
Ok(latest)
}
}
}
async fn fetch_scrobble(&self) -> anyhow::Result<String> {
let response = self
.client
.get("http://ws.audioscrobbler.com/2.0")
.query(&[
("method", "user.getRecentTracks"),
("api_key", &self.api_key),
("user", "Doomboy95"),
("limit", "1"),
("format", "json"),
])
.send()
.await?;
Ok(response.text().await?)
}
}
| {
tracing::debug!("returning recently fetched scrobble data");
return Ok(scrobble.data.clone());
} |
user-dao.service.ts | import { Injectable } from '@nestjs/common';
import { PrismaService } from '../../prisma/prisma.service';
import {
User,
UserWhereUniqueInput,
UserWhereInput,
UserCreateInput,
UserUpdateInput,
UserOrderByInput
} from '@prisma/client';
import { BaseDaoService } from '../base-dao/base-dao-service';
@Injectable()
export class | extends BaseDaoService<User, UserWhereUniqueInput, UserWhereInput, UserCreateInput, UserUpdateInput, UserOrderByInput>{
constructor(private prisma: PrismaService) {
super(prisma.user)
}
}
| UserDaoService |
rng.rs | // Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! [`Rng`] trait
use rand_core::{Error, RngCore};
use crate::distributions::uniform::{SampleRange, SampleUniform};
use crate::distributions::{self, Distribution, Standard};
use core::num::Wrapping;
use core::{mem, slice};
/// An automatically-implemented extension trait on [`RngCore`] providing high-level
/// generic methods for sampling values and other convenience methods.
///
/// This is the primary trait to use when generating random values.
///
/// # Generic usage
///
/// The basic pattern is `fn foo<R: Rng + ?Sized>(rng: &mut R)`. Some
/// things are worth noting here:
///
/// - Since `Rng: RngCore` and every `RngCore` implements `Rng`, it makes no
/// difference whether we use `R: Rng` or `R: RngCore`.
/// - The `+ ?Sized` un-bounding allows functions to be called directly on
/// type-erased references; i.e. `foo(r)` where `r: &mut dyn RngCore`. Without
/// this it would be necessary to write `foo(&mut r)`.
///
/// An alternative pattern is possible: `fn foo<R: Rng>(rng: R)`. This has some
/// trade-offs. It allows the argument to be consumed directly without a `&mut`
/// (which is how `from_rng(thread_rng())` works); also it still works directly
/// on references (including type-erased references). Unfortunately within the
/// function `foo` it is not known whether `rng` is a reference type or not,
/// hence many uses of `rng` require an extra reference, either explicitly
/// (`distr.sample(&mut rng)`) or implicitly (`rng.gen()`); one may hope the
/// optimiser can remove redundant references later.
///
/// Example:
///
/// ```
/// # use rand::thread_rng;
/// use rand::Rng;
///
/// fn foo<R: Rng + ?Sized>(rng: &mut R) -> f32 {
/// rng.gen() | /// # let v = foo(&mut thread_rng());
/// ```
pub trait Rng: RngCore {
/// Return a random value supporting the [`Standard`] distribution.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let x: u32 = rng.gen();
/// println!("{}", x);
/// println!("{:?}", rng.gen::<(f64, bool)>());
/// ```
///
/// # Arrays and tuples
///
/// The `rng.gen()` method is able to generate arrays (up to 32 elements)
/// and tuples (up to 12 elements), so long as all element types can be
/// generated.
/// When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support
/// arrays larger than 32 elements.
///
/// For arrays of integers, especially for those with small element types
/// (< 64 bit), it will likely be faster to instead use [`Rng::fill`].
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let tuple: (u8, i32, char) = rng.gen(); // arbitrary tuple support
///
/// let arr1: [f32; 32] = rng.gen(); // array construction
/// let mut arr2 = [0u8; 128];
/// rng.fill(&mut arr2); // array fill
/// ```
///
/// [`Standard`]: distributions::Standard
#[inline]
fn gen<T>(&mut self) -> T
where Standard: Distribution<T> {
Standard.sample(self)
}
/// Generate a random value in the given range.
///
/// This function is optimised for the case that only a single sample is
/// made from the given range. See also the [`Uniform`] distribution
/// type which may be faster if sampling from the same range repeatedly.
///
/// Only `gen_range(low..high)` and `gen_range(low..=high)` are supported.
///
/// # Panics
///
/// Panics if the range is empty.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
///
/// // Exclusive range
/// let n: u32 = rng.gen_range(0..10);
/// println!("{}", n);
/// let m: f64 = rng.gen_range(-40.0..1.3e5);
/// println!("{}", m);
///
/// // Inclusive range
/// let n: u32 = rng.gen_range(0..=10);
/// println!("{}", n);
/// ```
///
/// [`Uniform`]: distributions::uniform::Uniform
fn gen_range<T, R>(&mut self, range: R) -> T
where
T: SampleUniform,
R: SampleRange<T>
{
assert!(!range.is_empty(), "cannot sample empty range");
range.sample_single(self)
}
/// Sample a new value, using the given distribution.
///
/// ### Example
///
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::Uniform;
///
/// let mut rng = thread_rng();
/// let x = rng.sample(Uniform::new(10u32, 15));
/// // Type annotation requires two types, the type and distribution; the
/// // distribution can be inferred.
/// let y = rng.sample::<u16, _>(Uniform::new(10, 15));
/// ```
fn sample<T, D: Distribution<T>>(&mut self, distr: D) -> T {
distr.sample(self)
}
/// Create an iterator that generates values using the given distribution.
///
/// Note that this function takes its arguments by value. This works since
/// `(&mut R): Rng where R: Rng` and
/// `(&D): Distribution where D: Distribution`,
/// however borrowing is not automatic hence `rng.sample_iter(...)` may
/// need to be replaced with `(&mut rng).sample_iter(...)`.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::{Alphanumeric, Uniform, Standard};
///
/// let mut rng = thread_rng();
///
/// // Vec of 16 x f32:
/// let v: Vec<f32> = (&mut rng).sample_iter(Standard).take(16).collect();
///
/// // String:
/// let s: String = (&mut rng).sample_iter(Alphanumeric)
/// .take(7)
/// .map(char::from)
/// .collect();
///
/// // Combined values
/// println!("{:?}", (&mut rng).sample_iter(Standard).take(5)
/// .collect::<Vec<(f64, bool)>>());
///
/// // Dice-rolling:
/// let die_range = Uniform::new_inclusive(1, 6);
/// let mut roll_die = (&mut rng).sample_iter(die_range);
/// while roll_die.next().unwrap() != 6 {
/// println!("Not a 6; rolling again!");
/// }
/// ```
fn sample_iter<T, D>(self, distr: D) -> distributions::DistIter<D, Self, T>
where
D: Distribution<T>,
Self: Sized,
{
distr.sample_iter(self)
}
/// Fill any type implementing [`Fill`] with random data
///
/// The distribution is expected to be uniform with portable results, but
/// this cannot be guaranteed for third-party implementations.
///
/// This is identical to [`try_fill`] except that it panics on error.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut arr = [0i8; 20];
/// thread_rng().fill(&mut arr[..]);
/// ```
///
/// [`fill_bytes`]: RngCore::fill_bytes
/// [`try_fill`]: Rng::try_fill
fn fill<T: Fill + ?Sized>(&mut self, dest: &mut T) {
dest.try_fill(self).unwrap_or_else(|_| panic!("Rng::fill failed"))
}
/// Fill any type implementing [`Fill`] with random data
///
/// The distribution is expected to be uniform with portable results, but
/// this cannot be guaranteed for third-party implementations.
///
/// This is identical to [`fill`] except that it forwards errors.
///
/// # Example
///
/// ```
/// # use rand::Error;
/// use rand::{thread_rng, Rng};
///
/// # fn try_inner() -> Result<(), Error> {
/// let mut arr = [0u64; 4];
/// thread_rng().try_fill(&mut arr[..])?;
/// # Ok(())
/// # }
///
/// # try_inner().unwrap()
/// ```
///
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
/// [`fill`]: Rng::fill
fn try_fill<T: Fill + ?Sized>(&mut self, dest: &mut T) -> Result<(), Error> {
dest.try_fill(self)
}
/// Return a bool with a probability `p` of being true.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same probability repeatedly.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// println!("{}", rng.gen_bool(1.0 / 3.0));
/// ```
///
/// # Panics
///
/// If `p < 0` or `p > 1`.
///
/// [`Bernoulli`]: distributions::Bernoulli
#[inline]
fn gen_bool(&mut self, p: f64) -> bool {
let d = distributions::Bernoulli::new(p).unwrap();
self.sample(d)
}
/// Return a bool with a probability of `numerator/denominator` of being
/// true. I.e. `gen_ratio(2, 3)` has chance of 2 in 3, or about 67%, of
/// returning true. If `numerator == denominator`, then the returned value
/// is guaranteed to be `true`. If `numerator == 0`, then the returned
/// value is guaranteed to be `false`.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same `numerator` and `denominator` repeatedly.
///
/// # Panics
///
/// If `denominator == 0` or `numerator > denominator`.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// println!("{}", rng.gen_ratio(2, 3));
/// ```
///
/// [`Bernoulli`]: distributions::Bernoulli
#[inline]
fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool {
let d = distributions::Bernoulli::from_ratio(numerator, denominator).unwrap();
self.sample(d)
}
}
impl<R: RngCore + ?Sized> Rng for R {}
/// Types which may be filled with random data
///
/// This trait allows arrays to be efficiently filled with random data.
///
/// Implementations are expected to be portable across machines unless
/// clearly documented otherwise (see the
/// [Chapter on Portability](https://rust-random.github.io/book/portability.html)).
pub trait Fill {
/// Fill self with random data
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error>;
}
macro_rules! impl_fill_each {
() => {};
($t:ty) => {
impl Fill for [$t] {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
for elt in self.iter_mut() {
*elt = rng.gen();
}
Ok(())
}
}
};
($t:ty, $($tt:ty,)*) => {
impl_fill_each!($t);
impl_fill_each!($($tt,)*);
};
}
impl_fill_each!(bool, char, f32, f64,);
impl Fill for [u8] {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
rng.try_fill_bytes(self)
}
}
macro_rules! impl_fill {
() => {};
($t:ty) => {
impl Fill for [$t] {
#[inline(never)] // in micro benchmarks, this improves performance
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
if self.len() > 0 {
rng.try_fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
self.len() * mem::size_of::<$t>()
)
})?;
for x in self {
*x = x.to_le();
}
}
Ok(())
}
}
impl Fill for [Wrapping<$t>] {
#[inline(never)]
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
if self.len() > 0 {
rng.try_fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
self.len() * mem::size_of::<$t>()
)
})?;
for x in self {
*x = Wrapping(x.0.to_le());
}
}
Ok(())
}
}
};
($t:ty, $($tt:ty,)*) => {
impl_fill!($t);
// TODO: this could replace above impl once Rust #32463 is fixed
// impl_fill!(Wrapping<$t>);
impl_fill!($($tt,)*);
}
}
impl_fill!(u16, u32, u64, usize, u128,);
impl_fill!(i8, i16, i32, i64, isize, i128,);
#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))]
#[cfg(feature = "min_const_gen")]
impl<T, const N: usize> Fill for [T; N]
where [T]: Fill
{
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
self[..].try_fill(rng)
}
}
#[cfg(not(feature = "min_const_gen"))]
macro_rules! impl_fill_arrays {
($n:expr,) => {};
($n:expr, $N:ident) => {
impl<T> Fill for [T; $n] where [T]: Fill {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
self[..].try_fill(rng)
}
}
};
($n:expr, $N:ident, $($NN:ident,)*) => {
impl_fill_arrays!($n, $N);
impl_fill_arrays!($n - 1, $($NN,)*);
};
(!div $n:expr,) => {};
(!div $n:expr, $N:ident, $($NN:ident,)*) => {
impl_fill_arrays!($n, $N);
impl_fill_arrays!(!div $n / 2, $($NN,)*);
};
}
#[cfg(not(feature = "min_const_gen"))]
#[rustfmt::skip]
impl_fill_arrays!(32, N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,);
#[cfg(not(feature = "min_const_gen"))]
impl_fill_arrays!(!div 4096, N,N,N,N,N,N,N,);
#[cfg(test)]
mod test {
use super::*;
use crate::test::rng;
use crate::rngs::mock::StepRng;
#[cfg(feature = "alloc")] use alloc::boxed::Box;
#[test]
fn test_fill_bytes_default() {
let mut r = StepRng::new(0x11_22_33_44_55_66_77_88, 0);
// check every remainder mod 8, both in small and big vectors.
let lengths = [0, 1, 2, 3, 4, 5, 6, 7, 80, 81, 82, 83, 84, 85, 86, 87];
for &n in lengths.iter() {
let mut buffer = [0u8; 87];
let v = &mut buffer[0..n];
r.fill_bytes(v);
// use this to get nicer error messages.
for (i, &byte) in v.iter().enumerate() {
if byte == 0 {
panic!("byte {} of {} is zero", i, n)
}
}
}
}
#[test]
fn test_fill() {
let x = 9041086907909331047; // a random u64
let mut rng = StepRng::new(x, 0);
// Convert to byte sequence and back to u64; byte-swap twice if BE.
let mut array = [0u64; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x, x]);
assert_eq!(rng.next_u64(), x);
// Convert to bytes then u32 in LE order
let mut array = [0u32; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x as u32, (x >> 32) as u32]);
assert_eq!(rng.next_u32(), x as u32);
// Check equivalence using wrapped arrays
let mut warray = [Wrapping(0u32); 2];
rng.fill(&mut warray[..]);
assert_eq!(array[0], warray[0].0);
assert_eq!(array[1], warray[1].0);
// Check equivalence for generated floats
let mut array = [0f32; 2];
rng.fill(&mut array);
let gen: [f32; 2] = rng.gen();
assert_eq!(array, gen);
}
#[test]
fn test_fill_empty() {
let mut array = [0u32; 0];
let mut rng = StepRng::new(0, 1);
rng.fill(&mut array);
rng.fill(&mut array[..]);
}
#[test]
fn test_gen_range_int() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.gen_range(-4711..17);
assert!((-4711..17).contains(&a));
let a: i8 = r.gen_range(-3..42);
assert!((-3..42).contains(&a));
let a: u16 = r.gen_range(10..99);
assert!((10..99).contains(&a));
let a: i32 = r.gen_range(-100..2000);
assert!((-100..2000).contains(&a));
let a: u32 = r.gen_range(12..=24);
assert!((12..=24).contains(&a));
assert_eq!(r.gen_range(0u32..1), 0u32);
assert_eq!(r.gen_range(-12i64..-11), -12i64);
assert_eq!(r.gen_range(3_000_000..3_000_001), 3_000_000);
}
}
#[test]
fn test_gen_range_float() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.gen_range(-4.5..1.7);
assert!((-4.5..1.7).contains(&a));
let a = r.gen_range(-1.1..=-0.3);
assert!((-1.1..=-0.3).contains(&a));
assert_eq!(r.gen_range(0.0f32..=0.0), 0.);
assert_eq!(r.gen_range(-11.0..=-11.0), -11.);
assert_eq!(r.gen_range(3_000_000.0..=3_000_000.0), 3_000_000.);
}
}
#[test]
#[should_panic]
fn test_gen_range_panic_int() {
#![allow(clippy::reversed_empty_ranges)]
let mut r = rng(102);
r.gen_range(5..-2);
}
#[test]
#[should_panic]
fn test_gen_range_panic_usize() {
#![allow(clippy::reversed_empty_ranges)]
let mut r = rng(103);
r.gen_range(5..2);
}
#[test]
fn test_gen_bool() {
#![allow(clippy::bool_assert_comparison)]
let mut r = rng(105);
for _ in 0..5 {
assert_eq!(r.gen_bool(0.0), false);
assert_eq!(r.gen_bool(1.0), true);
}
}
#[test]
fn test_rng_trait_object() {
use crate::distributions::{Distribution, Standard};
let mut rng = rng(109);
let mut r = &mut rng as &mut dyn RngCore;
r.next_u32();
r.gen::<i32>();
assert_eq!(r.gen_range(0..1), 0);
let _c: u8 = Standard.sample(&mut r);
}
#[test]
#[cfg(feature = "alloc")]
fn test_rng_boxed_trait() {
use crate::distributions::{Distribution, Standard};
let rng = rng(110);
let mut r = Box::new(rng) as Box<dyn RngCore>;
r.next_u32();
r.gen::<i32>();
assert_eq!(r.gen_range(0..1), 0);
let _c: u8 = Standard.sample(&mut r);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_gen_ratio_average() {
const NUM: u32 = 3;
const DENOM: u32 = 10;
const N: u32 = 100_000;
let mut sum: u32 = 0;
let mut rng = rng(111);
for _ in 0..N {
if rng.gen_ratio(NUM, DENOM) {
sum += 1;
}
}
// Have Binomial(N, NUM/DENOM) distribution
let expected = (NUM * N) / DENOM; // exact integer
assert!(((sum - expected) as i32).abs() < 500);
}
} | /// }
/// |
actions.rs | use super::state::*;
use crate::module::_common::edit::history::state::HistoryState;
use shared::{
api::endpoints::{jig::module::*, ApiEndpoint}, | domain::jig::{
module::{
body::{BodyExt, ModeExt, StepExt},
*,
},
*,
},
error::EmptyError,
};
use std::rc::Rc;
use super::base::state::*;
use super::choose::state::*;
use dominator::clone;
use dominator_helpers::futures::AsyncLoader;
use std::future::Future;
use utils::{prelude::*, screenshot::call_screenshot_service};
impl<Mode, Step, RawData, Base, Main, Sidebar, Header, Footer, Overlay>
GenericState<Mode, Step, RawData, Base, Main, Sidebar, Header, Footer, Overlay>
where
Mode: ModeExt + 'static,
Step: StepExt + 'static,
RawData: BodyExt<Mode, Step> + 'static,
Base: BaseExt<Step> + 'static,
Main: MainExt + 'static,
Sidebar: SidebarExt + 'static,
Header: HeaderExt + 'static,
Footer: FooterExt + 'static,
Overlay: OverlayExt + 'static,
{
pub fn change_phase_choose<BaseInitFromRawFn, BaseInitFromRawOutput>(
_self: Rc<Self>,
init_from_raw: BaseInitFromRawFn,
) where
BaseInitFromRawFn:
Fn(BaseInitFromRawArgs<RawData, Mode, Step>) -> BaseInitFromRawOutput + Clone + 'static,
BaseInitFromRawOutput:
Future<Output = BaseInit<Step, Base, Main, Sidebar, Header, Footer, Overlay>>,
{
_self.phase.set(Rc::new(Phase::Choose(Rc::new(Choose::new(
_self.clone(),
init_from_raw,
)))));
}
pub async fn change_phase_base<BaseInitFromRawFn, BaseInitFromRawOutput>(
_self: Rc<Self>,
init_from_raw: BaseInitFromRawFn,
init_args: BaseInitFromRawArgs<RawData, Mode, Step>,
) -> Rc<AppBase<RawData, Mode, Step, Base, Main, Sidebar, Header, Footer, Overlay>>
where
BaseInitFromRawFn:
Fn(BaseInitFromRawArgs<RawData, Mode, Step>) -> BaseInitFromRawOutput + Clone + 'static,
BaseInitFromRawOutput:
Future<Output = BaseInit<Step, Base, Main, Sidebar, Header, Footer, Overlay>>,
{
let app_base = Rc::new(AppBase::new(_self.clone(), init_from_raw, init_args).await);
_self.phase.set(Rc::new(Phase::Base(app_base.clone())));
app_base
}
pub fn reset_from_history<BaseInitFromRawFn, BaseInitFromRawOutput>(
_self: Rc<Self>,
init_from_raw: BaseInitFromRawFn,
) -> Box<dyn Fn(RawData)>
where
BaseInitFromRawFn:
Fn(BaseInitFromRawArgs<RawData, Mode, Step>) -> BaseInitFromRawOutput + Clone + 'static,
BaseInitFromRawOutput:
Future<Output = BaseInit<Step, Base, Main, Sidebar, Header, Footer, Overlay>>,
{
Box::new(move |raw: RawData| {
_self
.reset_from_history_loader
.load(clone!(_self, init_from_raw => async move {
let (jig_id, module_id, jig) = (
_self.opts.jig_id,
_self.opts.module_id,
_self.jig.borrow().clone().unwrap_ji()
);
if raw.requires_choose_mode() {
Self::change_phase_choose(_self.clone(), init_from_raw.clone());
} else {
Self::change_phase_base(
_self.clone(),
init_from_raw.clone(),
BaseInitFromRawArgs::new(
jig_id,
module_id,
jig,
raw,
InitSource::History,
_self.history.borrow().as_ref().unwrap_ji().clone()
)
).await;
}
}));
})
}
}
pub type HistoryStateImpl<RawData> =
HistoryState<RawData, Box<dyn Fn(RawData)>, Box<dyn Fn(RawData)>>;
//pub type HistorySaveFn<RawData> = impl Fn(RawData);
pub fn save_history<RawData, Mode, Step>(
skip_for_debug: bool,
screenshot_loader: Rc<AsyncLoader>,
save_loader: Rc<AsyncLoader>,
jig_id: JigId,
module_id: ModuleId,
) -> Box<dyn Fn(RawData)>
where
RawData: BodyExt<Mode, Step> + 'static,
Mode: ModeExt + 'static,
Step: StepExt + 'static,
{
Box::new(move |raw_data: RawData| {
if !skip_for_debug {
save(
raw_data,
screenshot_loader.clone(),
save_loader.clone(),
jig_id,
module_id,
);
}
})
}
pub fn save<RawData, Mode, Step>(
raw_data: RawData,
screenshot_loader: Rc<AsyncLoader>,
save_loader: Rc<AsyncLoader>,
jig_id: JigId,
module_id: ModuleId,
) where
RawData: BodyExt<Mode, Step> + 'static,
Mode: ModeExt + 'static,
Step: StepExt + 'static,
{
save_loader.load(async move {
let body = raw_data.as_body();
let path = Update::PATH.replace("{id}", &jig_id.0.to_string());
let is_complete = raw_data.is_complete();
let req = Some(ModuleUpdateRequest {
id: StableOrUniqueId::Unique(module_id),
is_complete: Some(is_complete),
index: None,
body: Some(body),
});
let _ = api_with_auth_empty::<EmptyError, _>(&path, Update::METHOD, req).await;
// Update the sidebar with this modules completion status
let _ = IframeAction::new(ModuleToJigEditorMessage::Complete(module_id, is_complete))
.try_post_message_to_editor();
if is_complete {
// Only generate a screenshot if the module has the minimum required content.
screenshot_loader.load(async move {
call_screenshot_service(jig_id, module_id, RawData::kind()).await;
});
}
});
}
//doesn't compile, gotta box for now: https://github.com/rust-lang/rust/issues/65442
//pub type HistoryUndoRedoFn<RawData> = impl Fn(Option<RawData>);
//pub fn history_on_undo_redo<Main, Mode, RawData>(state:Rc<State<Main, Mode, RawData>>) -> HistoryUndoRedoFn<RawData> | |
test_team_api.py | # coding: utf-8
"""
Quay Frontend
This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>. # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import quay
from api.team_api import TeamApi # noqa: E501
from quay.rest import ApiException
class TestTeamApi(unittest.TestCase):
"""TeamApi unit test stubs"""
def setUp(self):
self.api = api.team_api.TeamApi() # noqa: E501
def tearDown(self):
pass
def test_delete_organization_team(self):
"""Test case for delete_organization_team
"""
pass
def test_delete_organization_team_member(self):
"""Test case for delete_organization_team_member
"""
pass
def test_delete_team_member_email_invite(self):
"""Test case for delete_team_member_email_invite
"""
pass
def test_get_organization_team_members(self):
"""Test case for get_organization_team_members
"""
pass
def test_get_organization_team_permissions(self):
|
def test_invite_team_member_email(self):
"""Test case for invite_team_member_email
"""
pass
def test_update_organization_team(self):
"""Test case for update_organization_team
"""
pass
def test_update_organization_team_member(self):
"""Test case for update_organization_team_member
"""
pass
if __name__ == '__main__':
unittest.main()
| """Test case for get_organization_team_permissions
"""
pass |
qt_main.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt_main.ui'
#
# Created: Fri Apr 24 13:52:17 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(691, 635)
MainWindow.setDockNestingEnabled(False)
MainWindow.setDockOptions(QtGui.QMainWindow.AllowTabbedDocks | QtGui.QMainWindow.AnimatedDocks)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(3)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter_2 = QtGui.QSplitter(self.centralwidget)
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.splitter = QtGui.QSplitter(self.splitter_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.leo_outline_frame = QtGui.QFrame(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_outline_frame.sizePolicy().hasHeightForWidth())
self.leo_outline_frame.setSizePolicy(sizePolicy)
self.leo_outline_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_outline_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_outline_frame.setLineWidth(1)
self.leo_outline_frame.setObjectName("leo_outline_frame")
self.leo_outline_grid = QtGui.QGridLayout(self.leo_outline_frame)
self.leo_outline_grid.setMargin(0)
self.leo_outline_grid.setObjectName("leo_outline_grid")
self.leo_outline_inner_frame = QtGui.QFrame(self.leo_outline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_outline_inner_frame.sizePolicy().hasHeightForWidth())
self.leo_outline_inner_frame.setSizePolicy(sizePolicy)
self.leo_outline_inner_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_outline_inner_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_outline_inner_frame.setObjectName("leo_outline_inner_frame")
self.gridLayout_3 = QtGui.QGridLayout(self.leo_outline_inner_frame)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.treeWidget = QtGui.QTreeWidget(self.leo_outline_inner_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth())
self.treeWidget.setSizePolicy(sizePolicy)
self.treeWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.treeWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.treeWidget.setHeaderHidden(False)
self.treeWidget.setObjectName("treeWidget")
self.gridLayout_3.addWidget(self.treeWidget, 0, 0, 1, 1)
self.leo_outline_grid.addWidget(self.leo_outline_inner_frame, 0, 0, 1, 1)
self.leo_log_frame = QtGui.QFrame(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_log_frame.sizePolicy().hasHeightForWidth())
self.leo_log_frame.setSizePolicy(sizePolicy)
self.leo_log_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_log_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_log_frame.setLineWidth(1)
self.leo_log_frame.setObjectName("leo_log_frame")
self.leo_log_grid = QtGui.QGridLayout(self.leo_log_frame)
self.leo_log_grid.setMargin(0)
self.leo_log_grid.setObjectName("leo_log_grid")
self.leo_log_inner_frame = QtGui.QFrame(self.leo_log_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_log_inner_frame.sizePolicy().hasHeightForWidth())
self.leo_log_inner_frame.setSizePolicy(sizePolicy)
self.leo_log_inner_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_log_inner_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_log_inner_frame.setObjectName("leo_log_inner_frame")
self.gridLayout_7 = QtGui.QGridLayout(self.leo_log_inner_frame)
self.gridLayout_7.setMargin(0)
self.gridLayout_7.setObjectName("gridLayout_7")
self.tabWidget = QtGui.QTabWidget(self.leo_log_inner_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.textBrowser = QtWidgets.QTextBrowser(self.tab) # Was QtGui.QTextBrowser.
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_3.addWidget(self.textBrowser)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_5 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_5.setObjectName("gridLayout_5")
self.findPattern = QtGui.QLineEdit(self.tab_2)
self.findPattern.setObjectName("findPattern")
self.gridLayout_5.addWidget(self.findPattern, 0, 1, 1, 1)
self.findChange = QtGui.QLineEdit(self.tab_2)
self.findChange.setObjectName("findChange")
self.gridLayout_5.addWidget(self.findChange, 1, 1, 1, 1)
self.checkBoxWholeWord = QtGui.QCheckBox(self.tab_2)
self.checkBoxWholeWord.setObjectName("checkBoxWholeWord")
self.gridLayout_5.addWidget(self.checkBoxWholeWord, 2, 0, 1, 1)
self.checkBoxEntireOutline = QtGui.QCheckBox(self.tab_2)
self.checkBoxEntireOutline.setObjectName("checkBoxEntireOutline")
self.gridLayout_5.addWidget(self.checkBoxEntireOutline, 2, 1, 1, 1)
self.checkBoxIgnoreCase = QtGui.QCheckBox(self.tab_2)
self.checkBoxIgnoreCase.setObjectName("checkBoxIgnoreCase")
self.gridLayout_5.addWidget(self.checkBoxIgnoreCase, 3, 0, 1, 1)
self.checkBoxSuboutlineOnly = QtGui.QCheckBox(self.tab_2)
self.checkBoxSuboutlineOnly.setObjectName("checkBoxSuboutlineOnly")
self.gridLayout_5.addWidget(self.checkBoxSuboutlineOnly, 3, 1, 1, 1)
self.checkBoxWrapAround = QtGui.QCheckBox(self.tab_2)
self.checkBoxWrapAround.setObjectName("checkBoxWrapAround")
self.gridLayout_5.addWidget(self.checkBoxWrapAround, 4, 0, 1, 1)
self.checkBoxNodeOnly = QtGui.QCheckBox(self.tab_2)
self.checkBoxNodeOnly.setObjectName("checkBoxNodeOnly")
self.gridLayout_5.addWidget(self.checkBoxNodeOnly, 4, 1, 1, 1)
self.checkBoxReverse = QtGui.QCheckBox(self.tab_2)
self.checkBoxReverse.setObjectName("checkBoxReverse")
self.gridLayout_5.addWidget(self.checkBoxReverse, 5, 0, 1, 1)
self.checkBoxSearchHeadline = QtGui.QCheckBox(self.tab_2)
self.checkBoxSearchHeadline.setObjectName("checkBoxSearchHeadline")
self.gridLayout_5.addWidget(self.checkBoxSearchHeadline, 5, 1, 1, 1)
self.checkBoxRexexp = QtGui.QCheckBox(self.tab_2)
self.checkBoxRexexp.setObjectName("checkBoxRexexp")
self.gridLayout_5.addWidget(self.checkBoxRexexp, 6, 0, 1, 1)
self.checkBoxSearchBody = QtGui.QCheckBox(self.tab_2)
self.checkBoxSearchBody.setObjectName("checkBoxSearchBody")
self.gridLayout_5.addWidget(self.checkBoxSearchBody, 6, 1, 1, 1)
self.checkBoxMarkFinds = QtGui.QCheckBox(self.tab_2)
self.checkBoxMarkFinds.setObjectName("checkBoxMarkFinds")
self.gridLayout_5.addWidget(self.checkBoxMarkFinds, 7, 0, 1, 1)
self.checkBoxMarkChanges = QtGui.QCheckBox(self.tab_2)
self.checkBoxMarkChanges.setObjectName("checkBoxMarkChanges")
self.gridLayout_5.addWidget(self.checkBoxMarkChanges, 7, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.tab_2)
self.label_2.setObjectName("label_2")
self.gridLayout_5.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.tab_2)
self.label_3.setObjectName("label_3")
self.gridLayout_5.addWidget(self.label_3, 1, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName("tab_3")
self.tabWidget.addTab(self.tab_3, "")
self.leo_spell_tab = QtGui.QWidget()
self.leo_spell_tab.setObjectName("leo_spell_tab")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.leo_spell_tab)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setMargin(2)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.leo_spell_panel = QtGui.QFrame(self.leo_spell_tab)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.leo_spell_panel.setPalette(palette)
self.leo_spell_panel.setAutoFillBackground(False)
self.leo_spell_panel.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_spell_panel.setFrameShadow(QtGui.QFrame.Plain)
self.leo_spell_panel.setLineWidth(0)
self.leo_spell_panel.setObjectName("leo_spell_panel")
self.verticalLayout_6 = QtGui.QVBoxLayout(self.leo_spell_panel)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setSpacing(2)
self.gridLayout_6.setObjectName("gridLayout_6")
self.leo_spell_btn_Add = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_Add.setObjectName("leo_spell_btn_Add")
self.gridLayout_6.addWidget(self.leo_spell_btn_Add, 2, 1, 1, 1)
self.leo_spell_btn_Find = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_Find.setObjectName("leo_spell_btn_Find")
self.gridLayout_6.addWidget(self.leo_spell_btn_Find, 2, 0, 1, 1)
self.leo_spell_btn_Change = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_Change.setObjectName("leo_spell_btn_Change")
self.gridLayout_6.addWidget(self.leo_spell_btn_Change, 3, 0, 1, 1)
self.leo_spell_btn_FindChange = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_FindChange.setObjectName("leo_spell_btn_FindChange")
self.gridLayout_6.addWidget(self.leo_spell_btn_FindChange, 3, 1, 1, 1)
self.leo_spell_btn_Ignore = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_Ignore.setObjectName("leo_spell_btn_Ignore")
self.gridLayout_6.addWidget(self.leo_spell_btn_Ignore, 4, 0, 1, 1)
self.leo_spell_btn_Hide = QtGui.QPushButton(self.leo_spell_panel)
self.leo_spell_btn_Hide.setCheckable(False)
self.leo_spell_btn_Hide.setObjectName("leo_spell_btn_Hide")
self.gridLayout_6.addWidget(self.leo_spell_btn_Hide, 4, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem, 5, 0, 1, 1)
self.leo_spell_listBox = QtGui.QListWidget(self.leo_spell_panel)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_spell_listBox.sizePolicy().hasHeightForWidth())
self.leo_spell_listBox.setSizePolicy(sizePolicy)
self.leo_spell_listBox.setMinimumSize(QtCore.QSize(0, 0))
self.leo_spell_listBox.setMaximumSize(QtCore.QSize(150, 150))
self.leo_spell_listBox.setObjectName("leo_spell_listBox")
self.gridLayout_6.addWidget(self.leo_spell_listBox, 1, 0, 1, 2)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem1, 2, 2, 1, 1)
self.leo_spell_label = QtGui.QLabel(self.leo_spell_panel)
self.leo_spell_label.setObjectName("leo_spell_label")
self.gridLayout_6.addWidget(self.leo_spell_label, 0, 0, 1, 2)
self.verticalLayout_6.addLayout(self.gridLayout_6)
self.verticalLayout_5.addWidget(self.leo_spell_panel)
self.tabWidget.addTab(self.leo_spell_tab, "")
self.gridLayout_7.addWidget(self.tabWidget, 0, 0, 1, 1)
self.leo_log_grid.addWidget(self.leo_log_inner_frame, 0, 0, 1, 1)
self.leo_body_frame = QtGui.QFrame(self.splitter_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_body_frame.sizePolicy().hasHeightForWidth())
self.leo_body_frame.setSizePolicy(sizePolicy)
self.leo_body_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_body_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_body_frame.setLineWidth(1)
self.leo_body_frame.setObjectName("leo_body_frame")
self.leo_body_grid = QtGui.QGridLayout(self.leo_body_frame)
self.leo_body_grid.setMargin(0)
self.leo_body_grid.setObjectName("leo_body_grid")
self.leo_body_inner_frame = QtGui.QFrame(self.leo_body_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_body_inner_frame.sizePolicy().hasHeightForWidth())
self.leo_body_inner_frame.setSizePolicy(sizePolicy)
self.leo_body_inner_frame.setFrameShape(QtGui.QFrame.NoFrame)
self.leo_body_inner_frame.setFrameShadow(QtGui.QFrame.Plain)
self.leo_body_inner_frame.setLineWidth(1)
self.leo_body_inner_frame.setObjectName("leo_body_inner_frame")
self.grid = QtGui.QGridLayout(self.leo_body_inner_frame)
self.grid.setMargin(0)
self.grid.setObjectName("grid")
self.stackedWidget = QtGui.QStackedWidget(self.leo_body_inner_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget.sizePolicy().hasHeightForWidth())
self.stackedWidget.setSizePolicy(sizePolicy)
self.stackedWidget.setAcceptDrops(True)
self.stackedWidget.setLineWidth(1)
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtGui.QWidget()
self.page.setObjectName("page")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.page)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.textEdit = Qsci.QsciScintilla(self.page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setFrameShape(QtGui.QFrame.NoFrame)
self.textEdit.setFrameShadow(QtGui.QFrame.Plain)
self.textEdit.setLineWidth(0)
self.textEdit.setObjectName("textEdit")
self.verticalLayout_2.addWidget(self.textEdit)
self.stackedWidget.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName("page_2")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.page_2)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.richTextEdit = QtWidgets.QTextEdit(self.page_2) # Was QtGui.QTextEdit.
self.richTextEdit.setFrameShape(QtGui.QFrame.NoFrame)
self.richTextEdit.setFrameShadow(QtGui.QFrame.Plain)
self.richTextEdit.setLineWidth(0)
self.richTextEdit.setObjectName("richTextEdit")
self.verticalLayout_4.addWidget(self.richTextEdit)
self.stackedWidget.addWidget(self.page_2)
self.grid.addWidget(self.stackedWidget, 0, 0, 1, 1)
self.leo_body_grid.addWidget(self.leo_body_inner_frame, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.splitter_2)
self.leo_minibuffer_frame = QtGui.QFrame(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leo_minibuffer_frame.sizePolicy().hasHeightForWidth())
self.leo_minibuffer_frame.setSizePolicy(sizePolicy)
self.leo_minibuffer_frame.setMinimumSize(QtCore.QSize(100, 0))
self.leo_minibuffer_frame.setBaseSize(QtCore.QSize(0, 0))
self.leo_minibuffer_frame.setMidLineWidth(0)
self.leo_minibuffer_frame.setObjectName("leo_minibuffer_frame")
self.leo_minibuffer_layout = QtGui.QHBoxLayout(self.leo_minibuffer_frame)
self.leo_minibuffer_layout.setSpacing(4)
self.leo_minibuffer_layout.setContentsMargins(3, 2, 2, 0)
self.leo_minibuffer_layout.setObjectName("leo_minibuffer_layout")
self.label = QtGui.QLabel(self.leo_minibuffer_frame)
self.label.setObjectName("label")
self.leo_minibuffer_layout.addWidget(self.label)
self.lineEdit = QtGui.QLineEdit(self.leo_minibuffer_frame)
self.lineEdit.setObjectName("lineEdit")
self.leo_minibuffer_layout.addWidget(self.lineEdit)
self.verticalLayout.addWidget(self.leo_minibuffer_frame)
MainWindow.setCentralWidget(self.centralwidget) | sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.menubar.sizePolicy().hasHeightForWidth())
self.menubar.setSizePolicy(sizePolicy)
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionIPython = QtGui.QAction(MainWindow)
self.actionIPython.setObjectName("actionIPython")
self.label.setBuddy(self.lineEdit)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(3)
self.stackedWidget.setCurrentIndex(1)
QtCore.QObject.connect(self.leo_spell_btn_Add, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_Add)
QtCore.QObject.connect(self.leo_spell_btn_Change, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_Change)
QtCore.QObject.connect(self.leo_spell_btn_Find, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_Find)
QtCore.QObject.connect(self.leo_spell_btn_FindChange, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_FindChange)
QtCore.QObject.connect(self.leo_spell_btn_Hide, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_Hide)
QtCore.QObject.connect(self.leo_spell_btn_Ignore, QtCore.SIGNAL("clicked()"), MainWindow.do_leo_spell_btn_Ignore)
QtCore.QObject.connect(self.leo_spell_listBox, QtCore.SIGNAL("itemDoubleClicked(QListWidgetItem*)"), MainWindow.do_leo_spell_btn_FindChange)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Leo", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.headerItem().setText(0, QtGui.QApplication.translate("MainWindow", "1", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("MainWindow", "Tab 1", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxWholeWord.setText(QtGui.QApplication.translate("MainWindow", "Whole Word", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxEntireOutline.setText(QtGui.QApplication.translate("MainWindow", "Entire Outline", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxIgnoreCase.setText(QtGui.QApplication.translate("MainWindow", "Ignore Case", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxSuboutlineOnly.setText(QtGui.QApplication.translate("MainWindow", "Suboutline Only", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxWrapAround.setText(QtGui.QApplication.translate("MainWindow", "Wrap Around", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxNodeOnly.setText(QtGui.QApplication.translate("MainWindow", "Node Only", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxReverse.setText(QtGui.QApplication.translate("MainWindow", "Reverse", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxSearchHeadline.setText(QtGui.QApplication.translate("MainWindow", "Search Headline", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxRexexp.setText(QtGui.QApplication.translate("MainWindow", "Regexp", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxSearchBody.setText(QtGui.QApplication.translate("MainWindow", "Search Body", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxMarkFinds.setText(QtGui.QApplication.translate("MainWindow", "Mark Finds", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxMarkChanges.setText(QtGui.QApplication.translate("MainWindow", "Mark Changes", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Find:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Change:", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("MainWindow", "Tab 2", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), QtGui.QApplication.translate("MainWindow", "Page", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_Add.setText(QtGui.QApplication.translate("MainWindow", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_Find.setText(QtGui.QApplication.translate("MainWindow", "Find", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_Change.setText(QtGui.QApplication.translate("MainWindow", "Change", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_FindChange.setText(QtGui.QApplication.translate("MainWindow", "Change, Find", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_Ignore.setText(QtGui.QApplication.translate("MainWindow", "Ignore", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_btn_Hide.setText(QtGui.QApplication.translate("MainWindow", "Hide", None, QtGui.QApplication.UnicodeUTF8))
self.leo_spell_label.setText(QtGui.QApplication.translate("MainWindow", "Suggestions for:", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.leo_spell_tab), QtGui.QApplication.translate("MainWindow", "Spell", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Minibuffer", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setText(QtGui.QApplication.translate("MainWindow", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setText(QtGui.QApplication.translate("MainWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.actionIPython.setText(QtGui.QApplication.translate("MainWindow", "IPython", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import Qsci | self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 691, 19))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0) |
AppBar.js | import React from 'react'
import MuiAppBar from '@material-ui/core/AppBar'
| <MuiAppBar {...props} />
)
}
export default AppBar | function AppBar(props) {
return ( |
link.ts | import {Component} from "./component"
/**
* Represents a link between two {Component}s
*/
export class | {
readonly from : Component
readonly to : Component
constructor(from: Component, to: Component) {
this.from = from
this.to = to
}
} | Link |
app_temp.py | # Dash components
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
# Core data science libraries
import altair as alt | from temp_data import read_data
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
data = read_data()
@app.callback(
Output("heatmap", "srcDoc"),
Input("genres", "value"),
Input("years", "value"),
)
def plot_heatmap(genres, years):
filtered_data = data.query(
"release_date >= @years[0] & release_date <= @years[1] & genres in @genres"
)
alt.data_transformers.disable_max_rows()
chart = (
alt.Chart(filtered_data, title="Vote Average by Genre")
.mark_rect()
.encode(
x=alt.X("vote_average", bin=alt.Bin(maxbins=40), title="Vote Average"),
y=alt.Y("genres", title=""),
color=alt.Color("count()", title="Count"),
tooltip="count()",
)
).properties(width=350, height=300)
return chart.to_html()
@app.callback(
Output("linechart", "srcDoc"),
Input("genres", "value"),
Input("years", "value"),
)
def plot_linechart(genres, years):
filtered_data = data.query(
"release_date >= @years[0] & release_date <= @years[1] & genres in @genres"
)
click = alt.selection_multi(fields=["genres"], bind="legend")
chart = (
alt.Chart(filtered_data, title="Mean Budget by Release Year")
.mark_line(point=True)
.encode(
alt.X("release_year", title="Release Year"),
alt.Y("mean(budget_adj)", title="Adjusted Mean Budget ($)"),
tooltip=["release_year", "mean(budget_adj)"],
color=alt.Color("genres", title="Genre"),
opacity=alt.condition(click, alt.value(0.9), alt.value(0.05)),
)
.add_selection(click)
).properties(width=280, height=350)
return chart.to_html()
@app.callback(
Output("actor_table", "children"),
Input("genres_drill", "value"),
Input("years", "value"),
Input("budget", "value"),
)
def generate_actor_table(selected_genre, years, budget):
print(budget)
filtered_data = data.query(
"release_date >= @years[0] & release_date <= @years[1] & genres == @selected_genre & budget_adj >= @budget[0] & budget_adj <= @budget[1]"
)
top_actors = pd.DataFrame(
pd.Series(filtered_data["cast"].str.cat(sep="|").split("|")).value_counts(),
columns=["count"],
)
top_actors.index.names = ["actor"]
top_actors.reset_index(inplace=True)
return (
html.Thead(
html.Tr(
children=[
html.Th("Actor"),
html.Th("# of matching movies they starred in"),
]
)
),
html.Tbody(
[
html.Tr(
children=[html.Td(data[0]), html.Td(html.Br()), html.Td(data[1])]
)
for data in top_actors[["actor", "count"]][1:6].values
]
),
)
@app.callback(
Output("profit_year", "srcDoc"),
Input("genres", "value"),
Input("years", "value"),
)
def plot_profit_vs_year(genres, years):
filtered_data = data.query(
"release_date >= @years[0] & release_date <= @years[1] & genres in @genres"
)
click = alt.selection_multi(fields=["genres"], bind="legend")
chart = (
alt.Chart(filtered_data, title="Median Profit by Release Month")
.mark_line(point=True)
.encode(
x=alt.X("month(release_date):O", title="Release Month"),
y=alt.Y("median(profit):Q", title="Adjusted Profit ($)"),
color=alt.Color("genres", title="Genre"),
opacity=alt.condition(click, alt.value(0.9), alt.value(0.05)),
)
.add_selection(click)
).properties(width=280, height=350)
return chart.to_html()
def init_genres():
return random.sample(list(data["genres"].unique()), 6)
@app.callback(
Output("genres_drill", "options"),
Output("genres_drill", "value"),
Input("genres", "value"),
)
def update_genres(genres):
options_list = []
for item in genres:
options_list.append({"label": item, "value": item})
return (options_list, options_list[0]["label"])
collapse = html.Div(
[
dbc.Button(
"Learn more",
id="collapse-button",
className="mb-3",
outline=False,
style={'margin-top': '10px',
'width': '150px',
'background-color': 'white',
'color': 'steelblue'}
),
]
)
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
#### Toggle help text for plot1 #######
collapse2 = html.Div(
[
dbc.Button(
"Learn more",
id="collapse-button2",
className="mb-3",
outline=False,
style={'margin-top': '10px',
'width': '150px',
'background-color': 'white',
'color': 'steelblue'}
),
]
)
@app.callback(
Output("collapse2", "is_open"),
[Input("collapse-button2", "n_clicks")],
[State("collapse2", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
#############################
#### Toggle help text for plot2 #######
collapse3 = html.Div(
[
dbc.Button(
"Learn more",
id="collapse-button3",
className="mb-3",
outline=False,
style={'margin-top': '10px',
'width': '150px',
'background-color': 'white',
'color': 'steelblue'}
),
]
)
@app.callback(
Output("collapse3", "is_open"),
[Input("collapse-button3", "n_clicks")],
[State("collapse3", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
#############################
#############################
#### Toggle help text for plot3 #######
collapse4 = html.Div(
[
dbc.Button(
"Learn more",
id="collapse-button4",
className="mb-3",
outline=False,
style={'margin-top': '10px',
'width': '150px',
'background-color': 'white',
'color': 'steelblue'}
),
]
)
@app.callback(
Output("collapse4", "is_open"),
[Input("collapse-button4", "n_clicks")],
[State("collapse4", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
#############################
#### Toggle help text for plot4 #######
collapse5 = html.Div(
[
dbc.Button(
"Learn more",
id="collapse-button5",
className="mb-3",
outline=False,
style={'margin-top': '10px',
'width': '150px',
'background-color': 'white',
'color': 'steelblue'}
),
]
)
@app.callback(
Output("collapse5", "is_open"),
[Input("collapse-button5", "n_clicks")],
[State("collapse5", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
#############################
app.layout = dbc.Container(
[
dbc.Row([
html.H1("Movie Production Planner"),
dbc.Collapse(
html.P("""This dashboard is helping you understand x, y, and z,
which are really important because a, b, c.
Start using the dashboard by clicking on 1, 2, 3
and pulling i, ii, and iii.""",
style={'color': 'black', 'width': '50%'}
),
id="collapse",
),
dbc.Col([collapse]),
html.Br(),
]),
dbc.Row(
[
dbc.Col(
[
html.Label(
[
"Years",
]
),
dcc.RangeSlider(
id="years",
count=1,
step=1,
min=data["release_year"].min(),
max=data["release_year"].max(),
value=[2000, 2016],
marks={1960: "1960", 2015: "2015"},
tooltip={"always_visible": False, "placement": "top"},
),
],
md=6,
style={
"border": "0px",
"border-radius": "10px",
},
),
dbc.Col(
[
html.Label(
[
"Genres",
dcc.Dropdown(
id="genres",
options=[
{"label": col, "value": col}
for col in data["genres"].unique()
],
value=init_genres(),
multi=True,
),
]
),
],
md=6,
style={
"border": "0px",
"border-radius": "10px",
},
),
],
),
html.Br(),
## Main Plots Area
dbc.Row([
dbc.Col([
# First Row of Plots
dbc.Row(
[
dbc.Col(
[
dbc.Card([
dbc.CardHeader([
html.Label('Identify most-liked genres'),
dbc.Collapse(
html.P("""This is a heatmap which shows vote vounts for the different genres""",
style={'color': 'black', 'width': '50%'}
),
id="collapse2",
),
collapse2,
]),
dbc.CardBody([
html.Br(),
html.Iframe(
id="heatmap",
style={
"border-width": "0",
"width": "100%",
"height": "500px",
},
),
])
])
]
),
dbc.Col(
[
dbc.Card([
dbc.CardHeader([
html.Label('Discover historical and recent budget trends'),
dbc.Collapse(
html.P("""This shows variation in budget by release year for selected genres""",
style={'color': 'black', 'width': '50%'}
),
id="collapse3",
),
collapse3
]),
dbc.CardBody([
html.Br(),
html.Iframe(
id="linechart",
style={
"border-width": "0",
"width": "100%",
"height": "500px",
},
)
])
])
]
),
], style = {"margin_top": "10"}
),
# Second Row of Plots
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([
html.Label("Find some potential actors"),
dbc.Collapse(
html.P("""This displays most experienced actors whom you could potentially cast
in your movie given the genre of the movie and your budget""",
style={'color': 'black', 'width': '50%'}
),
id="collapse4"
),
collapse4,
]),
dbc.CardBody([
html.Br(),
html.Label(
"Find some potential actors",
style={"font-size": 20},
),
dbc.Row(
[
dbc.Col(
html.Label(
[
"1. Drill down on a specific genre",
dcc.Dropdown(
id="genres_drill",
multi=False,
style={
"width": "200px"
},
),
]
),
)
]
),
dbc.Row(
[
dbc.Col(
[
html.Label(
[
"2. Narrow down your budget"
]
),
dcc.RangeSlider(
id="budget",
count=1,
step=1,
min=data[
"budget_adj"
].min(),
max=data[
"budget_adj"
].max(),
value=[0, 425000000],
marks={
0.99: "0",
425000000: "425,000,000",
},
tooltip={
"always_visible": False,
"placement": "top",
},
),
],
style={"width": "100px"},
)
]
),
dbc.Row(
[
dbc.Col(
[
html.Label(
["3. Select an actor!"]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
html.Table(id="actor_table", style={
"border-width": "0",
"width": "100%",
"height": "300px",
}),
html.Br(),
]
)
]
)
])
])
]),
dbc.Col([
dbc.Card([
dbc.CardHeader([
html.Br(),
html.Label("Plan your release month",style={"font-size": 20}),
dbc.Collapse(
html.P("""This displays most experienced actors whom you could potentially cast
in your movie given the genre of the movie and your budget""",
style={'color': 'black', 'width': '50%'}
),
id="collapse5"
),
collapse5
]),
dbc.CardBody([
html.Iframe(
id="profit_year",
style={
"border-width": "0",
"width": "100%",
"height": "450px",
}
)
]),
])
])
])
]),
md=12,
style={
"width": "100%",
"height": "100%",
"border": "0px",
"border-radius": "10px",
}])
]
)
if __name__ == "__main__":
app.run_server(debug=True) | import pandas as pd
import random
# Data loading functions |
remote_zips.rs | use crate::{
core::{downloads, failable_unit::FailableUnit, io, logs},
log_tag,
};
use std::{fs::DirEntry, path::PathBuf}; |
pub fn fetch(url: &str, destination_dir_name: &str, destination_parent_dir: &PathBuf) -> FailableUnit {
let target_dir = destination_parent_dir.join(destination_dir_name);
if target_dir.exists() {
logs::out(log_tag!(), &format!("Destination already exists, skipping download: {:?}", &target_dir));
return Ok(());
}
io::in_temp_dir(&mut |temp_dir| {
let download_file_path = temp_dir.to_path_buf().join("download.zip");
downloads::download(&url, &download_file_path)?;
let unzipped_dir = temp_dir.join("unzipped");
io::unzip(&download_file_path, &unzipped_dir)?;
// We will now massage the name of the unzipped directory to be whatever the caller specified. The directory to rename will be the first child of the 'unzipped' directory where we just unzipped the files.
let content_dir = unzipped_dir.join(&destination_dir_name);
io::rename(&std::fs::read_dir(&unzipped_dir)?.filter_map(|e| e.ok()).collect::<Vec<DirEntry>>()[0].path(), &content_dir)?;
io::create_dir(&destination_parent_dir)?;
io::copy(&content_dir, &destination_parent_dir)
})
} | |
errors.go | package mid
import (
"context"
"net/http"
"github.com/lgarciaaco/machina-api/business/sys/validate"
v1Web "github.com/lgarciaaco/machina-api/business/web/v1"
"github.com/lgarciaaco/machina-api/foundation/web"
"go.uber.org/zap"
)
// Errors handles errors coming out of the call chain. It detects normal
// application errors which are used to respond to the client in a uniform way.
// Unexpected errors (status >= 500) are logged.
func Errors(log *zap.SugaredLogger) web.Middleware {
// This is the actual middleware function to be executed.
m := func(handler web.Handler) web.Handler {
// Create the handler that will be attached in the middleware chain.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
// If the context is missing this value, request the service
// to be shutdown gracefully.
v, err := web.GetValues(ctx)
if err != nil {
return web.NewShutdownError("web value missing from context")
}
|
// Log the error.
log.Errorw("ERROR", "traceid", v.TraceID, "ERROR", err)
// Build out the error response.
var er v1Web.ErrorResponse
var status int
switch {
case validate.IsFieldErrors(err):
fieldErrors := validate.GetFieldErrors(err)
er = v1Web.ErrorResponse{
Error: "data validation error",
Fields: fieldErrors.Fields(),
}
status = http.StatusBadRequest
case v1Web.IsRequestError(err):
reqErr := v1Web.GetRequestError(err)
er = v1Web.ErrorResponse{
Error: reqErr.Error(),
}
status = reqErr.Status
default:
er = v1Web.ErrorResponse{
Error: http.StatusText(http.StatusInternalServerError),
}
status = http.StatusInternalServerError
}
// Respond with the error back to the client.
if err := web.Respond(ctx, w, er, status); err != nil {
return err
}
// If we receive the shutdown err we need to return it
// back to the base handler to shutdown the service.
if ok := web.IsShutdown(err); ok {
return err
}
}
// The error has been handled so we can stop propagating it.
return nil
}
return h
}
return m
} | // Run the next handler and catch any propagated error.
if err := handler(ctx, w, r); err != nil { |
domain_defaults.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1 | import (
"context"
"knative.dev/pkg/apis"
)
// SetDefaults populates default values in Domain
func (d *Domain) SetDefaults(ctx context.Context) {
d.Spec.SetDefaults(apis.WithinSpec(ctx))
}
// SetDefaults populates default values in DomainSpec
func (s *DomainSpec) SetDefaults(ctx context.Context) {
} | |
issue-3907-2.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:issue_3907.rs
extern crate issue_3907;
type Foo = issue_3907::Foo; //~ ERROR: reference to trait
struct S {
name: int
}
fn | () {}
| main |
magefile.go | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
// +build mage
package main
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
"github.com/elastic/beats/v7/dev-tools/mage"
devtools "github.com/elastic/beats/v7/dev-tools/mage"
"github.com/pkg/errors"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/common"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest"
// mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/test"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
)
var hubID = "elastic"
var logDriverName = "elastic-logging-plugin"
var dockerPluginName = filepath.Join(hubID, logDriverName)
var packageStagingDir = "build/package/"
var packageEndDir = "build/distributions/"
var buildDir = filepath.Join(packageStagingDir, logDriverName)
var dockerExportPath = filepath.Join(packageStagingDir, "temproot.tar")
var rootImageName = "rootfsimage"
func init() {
devtools.BeatLicense = "Elastic License"
devtools.BeatDescription = "The Docker Logging Driver is a docker plugin for the Elastic Stack."
}
// getPluginName returns the fully qualified name:version string
func getPluginName() (string, error) {
version, err := mage.BeatQualifiedVersion()
if err != nil {
return "", errors.Wrap(err, "error getting beats version")
}
return dockerPluginName + ":" + version, nil
}
// createContainer builds the plugin and creates the container that will later become the rootfs used by the plugin
func createContainer(ctx context.Context, cli *client.Client) error {
dockerLogBeatDir, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "error getting work dir")
}
if !strings.Contains(dockerLogBeatDir, "dockerlogbeat") {
return errors.Errorf("not in dockerlogbeat directory: %s", dockerLogBeatDir)
}
// start to build the root container that'll be used to build the plugin
tmpDir, err := ioutil.TempDir("", "dockerBuildTar")
if err != nil {
return errors.Wrap(err, "Error locating temp dir")
}
defer sh.Rm(tmpDir)
tarPath := filepath.Join(tmpDir, "tarRoot.tar")
err = sh.RunV("tar", "cf", tarPath, "./")
if err != nil {
return errors.Wrap(err, "error creating tar")
}
buildContext, err := os.Open(tarPath)
if err != nil {
return errors.Wrap(err, "error opening temp dur")
}
defer buildContext.Close()
buildOpts := types.ImageBuildOptions{
Tags: []string{rootImageName},
Dockerfile: "Dockerfile",
}
//build, wait for output
buildResp, err := cli.ImageBuild(ctx, buildContext, buildOpts)
if err != nil {
return errors.Wrap(err, "error building final container image")
}
defer buildResp.Body.Close()
// This blocks until the build operation completes
buildStr, errBufRead := ioutil.ReadAll(buildResp.Body)
if errBufRead != nil {
return errors.Wrap(err, "error reading from docker output")
}
fmt.Printf("%s\n", string(buildStr))
return nil
}
// BuildContainer builds docker rootfs container root
// There's a somewhat complicated process for this:
// * Create a container to build the plugin itself
// * Copy that to a bare-bones container that will become the runc container used by docker
// * Export that container
// * Unpack the tar from the exported container
// * send this to the plugin create API endpoint
func BuildContainer(ctx context.Context) error {
// setup
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return errors.Wrap(err, "Error creating docker client")
}
mage.CreateDir(packageStagingDir)
mage.CreateDir(packageEndDir)
err = os.MkdirAll(filepath.Join(buildDir, "rootfs"), 0755)
if err != nil {
return errors.Wrap(err, "Error creating build dir")
}
err = createContainer(ctx, cli)
if err != nil {
return errors.Wrap(err, "error creating base container")
}
// create the container that will become our rootfs
CreatedContainerBody, err := cli.ContainerCreate(ctx, &container.Config{Image: rootImageName}, nil, nil, "")
if err != nil {
return errors.Wrap(err, "error creating container")
}
defer func() {
// cleanup
if _, noClean := os.LookupEnv("DOCKERLOGBEAT_NO_CLEANUP"); !noClean {
err = cleanDockerArtifacts(ctx, CreatedContainerBody.ID, cli)
if err != nil {
fmt.Fprintf(os.Stderr, "Error cleaning up docker: %s", err)
}
}
}()
fmt.Printf("Got image: %#v\n", CreatedContainerBody.ID)
file, err := os.Create(dockerExportPath)
if err != nil {
return errors.Wrap(err, "error creating tar archive")
}
// export the container to a tar file
exportReader, err := cli.ContainerExport(ctx, CreatedContainerBody.ID)
if err != nil {
return errors.Wrap(err, "error exporting container")
}
_, err = io.Copy(file, exportReader)
if err != nil {
return errors.Wrap(err, "Error writing exported container")
}
//misc prepare operations
err = mage.Copy("config.json", filepath.Join(buildDir, "config.json"))
if err != nil {
return errors.Wrap(err, "error copying config.json")
}
// unpack the tar file into a root directory, which is the format needed for the docker plugin create tool
err = sh.RunV("tar", "-xf", dockerExportPath, "-C", filepath.Join(buildDir, "rootfs"))
if err != nil {
return errors.Wrap(err, "error unpacking exported container")
}
return nil
}
func cleanDockerArtifacts(ctx context.Context, containerID string, cli *client.Client) error {
fmt.Printf("Removing container %s\n", containerID)
err := cli.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
if err != nil {
return errors.Wrap(err, "error removing container")
}
resp, err := cli.ImageRemove(ctx, rootImageName, types.ImageRemoveOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error removing image")
}
fmt.Printf("Removed image: %#v\n", resp)
return nil
}
// Uninstall removes working objects and containers
func Uninstall(ctx context.Context) error {
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return errors.Wrap(err, "Error creating docker client")
}
//check to see if we have a plugin we need to remove
plugins, err := cli.PluginList(ctx, filters.Args{})
if err != nil {
return errors.Wrap(err, "error getting list of plugins")
}
toRemoveName := ""
for _, plugin := range plugins {
if strings.Contains(plugin.Name, logDriverName) {
toRemoveName = plugin.Name
break
}
}
if toRemoveName == "" {
return nil
}
err = cli.PluginDisable(ctx, toRemoveName, types.PluginDisableOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error disabling plugin")
}
err = cli.PluginRemove(ctx, toRemoveName, types.PluginRemoveOptions{Force: true})
if err != nil {
return errors.Wrap(err, "error removing plugin")
}
return nil
}
// Install installs the plugin
func Install(ctx context.Context) error {
mg.Deps(Uninstall)
if _, err := os.Stat(filepath.Join(packageStagingDir, "rootfs")); os.IsNotExist(err) {
mg.Deps(Build)
}
name, err := getPluginName()
if err != nil {
return err
}
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return errors.Wrap(err, "Error creating docker client")
}
archiveOpts := &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: []string{"rootfs", "config.json"},
}
archive, err := archive.TarWithOptions(buildDir, archiveOpts)
if err != nil {
return errors.Wrap(err, "error creating archive of work dir")
}
err = cli.PluginCreate(ctx, archive, types.PluginCreateOptions{RepoName: name})
if err != nil {
return errors.Wrap(err, "error creating plugin")
}
err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{})
if err != nil {
return errors.Wrap(err, "error enabling plugin")
}
return nil
}
// Export exports a "ready" root filesystem and config.json into a tarball
func Export() error {
version, err := mage.BeatQualifiedVersion()
if err != nil {
return errors.Wrap(err, "error getting beats version")
}
if mage.Snapshot {
version = version + "-SNAPSHOT"
}
tarballName := fmt.Sprintf("%s-%s-%s.tar.gz", logDriverName, version, "docker-plugin")
outpath := filepath.Join("../..", packageEndDir, tarballName)
err = os.Chdir(packageStagingDir)
if err != nil {
return errors.Wrap(err, "error changing directory")
}
err = sh.RunV("tar", "zcf", outpath,
filepath.Join(logDriverName, "rootfs"),
filepath.Join(logDriverName, "config.json"))
if err != nil {
return errors.Wrap(err, "error creating release tarball")
}
return nil
}
// CrossBuild cross-builds the beat for all target platforms.
func CrossBuild() error {
return devtools.CrossBuild(devtools.ForPlatforms("linux/amd64"))
}
// Build builds the base container used by the docker plugin
func Build() |
// GolangCrossBuild build the Beat binary inside of the golang-builder.
// Do not use directly, use crossBuild instead.
func GolangCrossBuild() error {
buildArgs := devtools.DefaultBuildArgs()
buildArgs.CGO = false
buildArgs.Static = true
buildArgs.OutputDir = "build/plugin"
return devtools.GolangCrossBuild(buildArgs)
}
// Package builds a "release" tarball that can be used later with `docker plugin create`
func Package() {
mg.SerialDeps(Build, Export)
}
// BuildAndInstall builds and installs the plugin
func BuildAndInstall() {
mg.SerialDeps(Build, Install)
}
// IntegTest is currently a dummy test for the `testsuite` target
func IntegTest() {
fmt.Printf("There are no Integration tests for The Elastic Log Plugin\n")
}
// Update is currently a dummy test for the `testsuite` target
func Update() {
fmt.Printf("There is no Update for The Elastic Log Plugin\n")
}
| {
mg.SerialDeps(CrossBuild, BuildContainer)
} |
report.go | package saw | )
type VarInt interface {
Add(delta int64)
Set(value int64)
}
type VarFloat interface {
Add(delta float64)
Set(value float64)
}
var varLock sync.Mutex
// Creates or fetches a int var for reporting, unlike its underling expvar,
// ReportInt is expected to called when saws are dynamically created, in
// TableItemFactory etc, so that or saws inside a single table can shares same
// reporting metric.
func ReportInt(ns, name string) VarInt {
varName := ns + "." + name
varLock.Lock()
defer varLock.Unlock()
if v := expvar.Get(varName); v != nil {
return v.(*expvar.Int)
}
return expvar.NewInt(varName)
}
// Creates float var for reporting. see ReportInt() for usage detail.
func ReportFloat(ns, name string) VarFloat {
varName := ns + "." + name
varLock.Lock()
defer varLock.Unlock()
if v := expvar.Get(varName); v != nil {
return v.(*expvar.Float)
}
return expvar.NewFloat(varName)
} |
import (
"expvar"
"sync" |
subnets.go | package ec2
import (
"errors"
"fmt"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
"go.uber.org/zap"
)
const (
subnetCIDR1 = "192.168.64.0/18"
subnetCIDR2 = "192.168.128.0/18"
subnetCIDR3 = "192.168.192.0/18"
)
var zoneSfx = []string{"a", "b", "c"}
func (md *embedded) createSubnets() (err error) {
if md.cfg.VPCID == "" |
if len(md.cfg.SubnetIDs) > 0 {
return fmt.Errorf("subnets already exist (%q)", md.cfg.SubnetIDs)
}
md.cfg.SubnetIDs = make([]string, 0, 3)
md.cfg.SubnetIDToAvailabilityZone = make(map[string]string, 3)
for i, cidr := range []string{subnetCIDR1, subnetCIDR2, subnetCIDR3} {
var output *ec2.CreateSubnetOutput
output, err = md.ec2.CreateSubnet(&ec2.CreateSubnetInput{
VpcId: aws.String(md.cfg.VPCID),
CidrBlock: aws.String(cidr),
AvailabilityZone: aws.String(fmt.Sprintf("%s%s", md.cfg.AWSRegion, zoneSfx[i%len(zoneSfx)])),
})
if err != nil {
return err
}
id := *output.Subnet.SubnetId
az := *output.Subnet.AvailabilityZone
md.cfg.SubnetIDs = append(md.cfg.SubnetIDs, id)
md.cfg.SubnetIDToAvailabilityZone[id] = az
md.lg.Info(
"created subnet",
zap.String("vpc-id", md.cfg.VPCID),
zap.String("subnet-id", id),
zap.String("availability-zone", az),
)
}
if md.cfg.AssociatePublicIPAddress {
if err = md.associatePublicIP(); err != nil {
return err
}
}
sort.Strings(md.cfg.SubnetIDs)
return md.cfg.Sync()
}
func (md *embedded) deleteSubnet() (err error) {
if md.cfg.VPCID == "" {
return errors.New("cannot delete subnets without VPC ID")
}
if len(md.cfg.SubnetIDs) == 0 {
return errors.New("cannot delete subnets without Subnet IDs")
}
for _, id := range md.cfg.SubnetIDs {
for i := 0; i < 10; i++ {
// TODO: handle "DependencyViolation: The subnet 'subnet-034524cbada087b8d' has dependencies and cannot be deleted"
_, err = md.ec2.DeleteSubnet(&ec2.DeleteSubnetInput{
SubnetId: aws.String(id),
})
if err == nil {
break
}
if request.IsErrorRetryable(err) || request.IsErrorThrottle(err) {
md.lg.Warn("failed to delete subnet, retrying...", zap.Error(err))
time.Sleep(5 * time.Second)
continue
}
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
awsErr, ok := err.(awserr.Error)
if ok {
if awsErr.Code() == "InvalidSubnetID.NotFound" {
md.lg.Info(
"subnet does not exist",
zap.String("subnet-id", id),
)
return nil
}
}
return err
}
if err != nil {
return err
}
md.lg.Info(
"deleted subnet",
zap.String("vpc-id", md.cfg.VPCID),
zap.String("subnet-id", id),
)
}
time.Sleep(2 * time.Second)
_, err = md.ec2.DescribeSubnets(&ec2.DescribeSubnetsInput{
SubnetIds: aws.StringSlice(md.cfg.SubnetIDs),
})
if err != nil {
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
awsErr, ok := err.(awserr.Error)
if ok && awsErr.Code() == "InvalidSubnetID.NotFound" {
return nil
}
return err
}
return fmt.Errorf("deleted subnets but %v still exists", md.cfg.SubnetIDs)
}
func (md *embedded) getSubnets() (err error) {
if md.cfg.VPCID == "" {
return errors.New("cannot get subnets without VPC ID")
}
if len(md.cfg.SubnetIDs) > 0 {
return fmt.Errorf("subnets already exist (%q)", md.cfg.SubnetIDs)
}
if err = md.enableDNSHostnames(); err != nil {
return err
}
var output *ec2.DescribeSubnetsOutput
output, err = md.ec2.DescribeSubnets(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
{
Name: aws.String("vpc-id"),
Values: aws.StringSlice([]string{md.cfg.VPCID}),
},
},
})
if err != nil {
return err
}
md.cfg.SubnetIDs = make([]string, 0, len(output.Subnets))
md.cfg.SubnetIDToAvailabilityZone = make(map[string]string)
for _, sv := range output.Subnets {
md.cfg.SubnetIDs = append(md.cfg.SubnetIDs, *sv.SubnetId)
md.cfg.SubnetIDToAvailabilityZone[*sv.SubnetId] = *sv.AvailabilityZone
}
md.lg.Info(
"found subnets",
zap.String("vpc-id", md.cfg.VPCID),
zap.Strings("subnets", md.cfg.SubnetIDs),
)
return md.cfg.Sync()
}
func (md *embedded) associatePublicIP() (err error) {
for _, id := range md.cfg.SubnetIDs {
_, err = md.ec2.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Bool(true),
},
SubnetId: aws.String(id),
})
if err != nil {
return fmt.Errorf("failed to allow public IP assign for subnet %q (%v)", id, err)
}
md.lg.Debug("allowed public IP assign for subnet", zap.String("subnet-id", id))
}
return nil
}
| {
return errors.New("cannot create subnets without VPC ID")
} |
test_measure.py | import unittest
import numpy as np
from specc.analysis.analyzer import CircuitTester
from specc.aquisition.daq import DataAcquisitionInterface
from specc.data.results import SignalResponse
from tests.utils import ACCEPTABLE_ERROR, TEST_AMPLITUDE, TEST_DF, TEST_FREQUENCY, TEST_SAMPLES, TEST_SAMPLE_RATE
class TestDAQSampleRate(unittest.TestCase):
def test_sample_rate_limits(self):
minimum_sample_rate = 10
maximum_sample_rate = 100
class DAQInterfaceMock(DataAcquisitionInterface):
MINIMUM_SAMPLE_RATE = minimum_sample_rate
MAXIMUM_SAMPLE_RATE = maximum_sample_rate
self.assertRaises(AssertionError, lambda: DAQInterfaceMock(minimum_sample_rate - 1))
self.assertRaises(AssertionError, lambda: DAQInterfaceMock(maximum_sample_rate + 1))
def test_sample_rate_value(self):
initalization_sample_rate = 10
updated_sample_rate = 20
daq = DataAcquisitionInterface(initalization_sample_rate)
self.assertEqual(daq.sample_rate, initalization_sample_rate)
daq.sample_rate = updated_sample_rate
self.assertEqual(daq.sample_rate, updated_sample_rate)
class TestDAQTimeArray(unittest.TestCase):
def setUp(self) -> None:
self.daq = DataAcquisitionInterface(TEST_SAMPLE_RATE)
self.time_array = self.daq.calculate_time_array(TEST_SAMPLES)
def test_time_array_dimensions(self):
self.assertEqual(1, len(self.time_array.shape), "The time array is not a 1D-ndarray.")
self.assertEqual((TEST_SAMPLES,), self.time_array.shape)
def | (self):
expected_end_time = TEST_SAMPLES / TEST_SAMPLE_RATE
self.assertEqual(0, self.time_array[0], "Time array did not start at 0.")
self.assertEqual(expected_end_time, self.time_array[-1], f"Time array did not end at {expected_end_time}.")
class TestDAQAnalyzerRead(unittest.TestCase):
def setUp(self):
class DAQMock(DataAcquisitionInterface):
MOCK_OUTPUT_PHASE = np.pi / 4
def read(self, channels: np.ndarray, samples: int) -> np.ndarray:
end_time = samples / self.sample_rate
time_array = np.linspace(0, end_time, samples)
if len(channels) == 1:
signal = TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array)
elif len(channels) == 2:
signal = np.asarray([
TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array),
TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array + self.MOCK_OUTPUT_PHASE),
])
else:
raise NotImplementedError
return signal
self.daq = DAQMock(TEST_SAMPLE_RATE)
self.analyzer = CircuitTester(self.daq, 'empty', 'empty')
def test_measuring_single(self):
response = self.analyzer.measure_single(TEST_SAMPLES)
self.assertTrue(isinstance(response, SignalResponse))
self.assertAlmostEqual(self.daq.MOCK_OUTPUT_PHASE, response.relative_phase(TEST_FREQUENCY),
delta=ACCEPTABLE_ERROR)
self.assertAlmostEqual(1, response.relative_intensity(TEST_FREQUENCY, TEST_DF), delta=ACCEPTABLE_ERROR)
if __name__ == '__main__':
unittest.main()
| test_time_array_values |
wss_info.rs | use crate::transport::websocket::{streams::WebsocketStreamState, BaseStream, TransportResult};
/// Represents an individual connection
#[derive(Debug)]
pub struct WssInfo<T: std::io::Read + std::io::Write + std::fmt::Debug> {
pub(in crate::transport::websocket) request_id: String,
pub(in crate::transport::websocket) url: url::Url,
pub(in crate::transport::websocket) last_msg: std::time::Instant,
pub(in crate::transport::websocket) stateful_socket: WebsocketStreamState<T>,
}
impl<T: std::io::Read + std::io::Write + std::fmt::Debug> WssInfo<T> {
pub fn close(&mut self) -> TransportResult<()> {
if let WebsocketStreamState::ReadyWss(socket) = &mut self.stateful_socket {
socket.write_message(tungstenite::Message::Close(None))?;
socket.close(None)?;
socket.write_pending()?;
}
self.stateful_socket = WebsocketStreamState::None;
Ok(())
}
pub fn | (url: url::Url, socket: BaseStream<T>, is_server: bool) -> Self {
WssInfo {
// TODO set a request id
request_id: "".to_string(),
url,
last_msg: std::time::Instant::now(),
stateful_socket: match is_server {
false => WebsocketStreamState::Connecting(socket),
true => WebsocketStreamState::ConnectingSrv(socket),
},
}
}
pub fn client(url: url::Url, socket: BaseStream<T>) -> Self {
Self::new(url, socket, false)
}
pub fn server(url: url::Url, socket: BaseStream<T>) -> Self {
Self::new(url, socket, true)
}
}
| new |
index.js | import React from 'react';
import ReactDOM from 'react-dom';
import { BrowserRouter as Router } from 'react-router-dom'
import './index.css';
import App from './App';
| <Router>
<App />
</Router>,
document.getElementById('root')
); | ReactDOM.render( |
auth.service.ts | import { Injectable } from '@nestjs/common';
import { UserService } from '../user/user.service';
import { JwtService } from '@nestjs/jwt';
import * as bcrypt from 'bcrypt';
import { CustomerService } from 'src/customer/customer.service';
import { MerchantService } from 'src/merchant/merchant.service';
@Injectable()
export class AuthService {
constructor(
private userService: UserService,
private customerService: CustomerService,
private merchantService: MerchantService,
private jwtService: JwtService
) {}
async validateUser(email: string, pass: string, type: string): Promise<any> {
const user = await this.userService.findOneForLogin(email, type);
if (user && await bcrypt.compare(pass, user.password)) {
return {
id: user.id,
email: user.email,
role: user.type,
verified: user.verified
};
}
return null;
}
async login(loggedUser: any) {
let user;
const access_token = this.jwtService.sign({ email: loggedUser.email, sub: loggedUser.id, role: loggedUser.role })
if(loggedUser.role === 'customer') {
user = await this.customerService.findOne(loggedUser.id)
} | }
return {
access_token,
user
}
}
} | if(loggedUser.role === 'merchant') {
user = await this.merchantService.findOne(loggedUser.id)
user = user._id |
cli.rs | use std::env;
pub fn run() | {
let args: Vec<String> = env::args().collect();
let name: String = "Ajmal".to_string();
let status = "100%".to_string();
println!("Args: {:?}", args);
let command = args[1].clone();
println!("Command: {}", command);
if command == "hello" {
println!("Hi {}, how are you", name);
} else if command == "status" {
println!("Status is {}", status);
} else {
println!("That is not a valid command");
}
} |
|
asteroidsDataHandlerNeoWsAPI.go | package datahandlers
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"sort"
"strconv"
"time"
log "github.com/sirupsen/logrus"
"github.com/psyb0t/potentially-hazardous-asteroids/internal/pkg/cache"
phatypes "github.com/psyb0t/potentially-hazardous-asteroids/internal/pkg/types"
)
// AsteroidsDataHandlerNeoWsAPI is the struct used to handle asteroids data
// via the NeoWs API
type AsteroidsDataHandlerNeoWsAPI struct {
apiKey string
cache cache.Cache
}
// NewAsteroidsDataHandlerNeoWsAPI instantiates an AsteroidsDataHandlerNeoWsAPI struct
// with the given apiKey assigned to its field with the same name
func NewAsteroidsDataHandlerNeoWsAPI(apiKey string) *AsteroidsDataHandlerNeoWsAPI {
asteroidsDataHandlerNeoWsAPI := &AsteroidsDataHandlerNeoWsAPI{apiKey: apiKey}
asteroidsDataHandlerNeoWsAPI.cache = make(cache.Cache)
return asteroidsDataHandlerNeoWsAPI
}
// GetAll returns the entire asteroid list
func (h *AsteroidsDataHandlerNeoWsAPI) GetAll() ([]*phatypes.Asteroid, error) {
var err error
var responseData []byte
// Try getting the response data from the cache, if it's set and the cache item is not expired
cachedResponseItem, ok := h.cache["asteroids_response_data"]
if !ok || cachedResponseItem.IsExpired() {
log.Debug("cached response item not set or expired. retrieving asteroid data from NASA's API")
apiURL := fmt.Sprintf(
"https://api.nasa.gov/neo/rest/v1/feed?start_date=%s&api_key=%s",
time.Now().Format("2006-01-02"), h.apiKey)
client := http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
request, err := http.NewRequest("GET", apiURL, nil)
if err != nil {
return nil, err
}
response, err := client.Do(request)
if err != nil {
return nil, err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return nil, ErrStatusNotOK
}
responseData, err = ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
cacheItem := cache.NewItem(time.Now().Add(time.Minute*10), responseData)
h.cache["asteroids_response_data"] = cacheItem
} else {
log.Debug("retrieving asteroid data from cache")
responseData, ok = cachedResponseItem.Data.([]byte)
if !ok {
return nil, ErrCacheItemDataUnexpectedDataType
}
}
asteroids, err := getAsteroidsFromResponseData(responseData)
if err != nil {
return nil, err
}
sort.SliceStable(asteroids, func(i, j int) bool {
return asteroids[i].CloseApproachTimestamp < asteroids[j].CloseApproachTimestamp
})
return asteroids, nil
}
func getAsteroidsFromResponseData(responseData []byte) ([]*phatypes.Asteroid, error) | {
var err error
asteroids := make([]*phatypes.Asteroid, 0)
responseDataStruct := struct {
NearEarthObjects map[string][]struct {
ID string `json:"id"`
Name string `json:"name"`
NASAJPLURL string `json:"nasa_jpl_url"`
EstimatedDiameter map[string]struct {
EstimatedDiameterMin float64 `json:"estimated_diameter_min"`
EstimatedDiameterMax float64 `json:"estimated_diameter_max"`
} `json:"estimated_diameter"`
IsPotentiallyHazardousAsteroid bool `json:"is_potentially_hazardous_asteroid"`
CloseApproachData []struct {
EpochDateCloseApproach int `json:"epoch_date_close_approach"`
RelativeVelocity struct {
KilometersPerHour string `json:"kilometers_per_hour"`
MilesPerHour string `json:"miles_per_hour"`
} `json:"relative_velocity"`
MissDistance struct {
Kilometers string `json:"kilometers"`
Miles string `json:"miles"`
} `json:"miss_distance"`
} `json:"close_approach_data"`
IsSentryObject bool `json:"is_sentry_object"`
} `json:"near_earth_objects"`
}{}
if err := json.Unmarshal(responseData, &responseDataStruct); err != nil {
return nil, err
}
for _, nearEarthObjectsForDate := range responseDataStruct.NearEarthObjects {
for _, nearEarthObjectForDate := range nearEarthObjectsForDate {
// Only interested in hazardous ones
if !nearEarthObjectForDate.IsPotentiallyHazardousAsteroid {
continue
}
asteroid := phatypes.NewAsteroid()
asteroid.ID = nearEarthObjectForDate.ID
asteroid.Name = nearEarthObjectForDate.Name
asteroid.NASAJPLURL = nearEarthObjectForDate.NASAJPLURL
asteroid.EstimatedDiameter.Kilometers.Min = nearEarthObjectForDate.EstimatedDiameter["kilometers"].EstimatedDiameterMin
asteroid.EstimatedDiameter.Kilometers.Max = nearEarthObjectForDate.EstimatedDiameter["kilometers"].EstimatedDiameterMax
asteroid.EstimatedDiameter.Miles.Min = nearEarthObjectForDate.EstimatedDiameter["miles"].EstimatedDiameterMin
asteroid.EstimatedDiameter.Miles.Max = nearEarthObjectForDate.EstimatedDiameter["miles"].EstimatedDiameterMax
asteroid.CloseApproachTimestamp = nearEarthObjectForDate.CloseApproachData[0].EpochDateCloseApproach
asteroid.RelativeVelocity.KilometersPerHour, err = strconv.ParseFloat(
nearEarthObjectForDate.CloseApproachData[0].RelativeVelocity.KilometersPerHour, 64)
if err != nil {
return nil, err
}
asteroid.RelativeVelocity.MilesPerHour, err = strconv.ParseFloat(
nearEarthObjectForDate.CloseApproachData[0].RelativeVelocity.MilesPerHour, 64)
if err != nil {
return nil, err
}
asteroid.MissDistance.Kilometers, err = strconv.ParseFloat(
nearEarthObjectForDate.CloseApproachData[0].MissDistance.Kilometers, 64)
if err != nil {
return nil, err
}
asteroid.MissDistance.Miles, err = strconv.ParseFloat(
nearEarthObjectForDate.CloseApproachData[0].MissDistance.Miles, 64)
if err != nil {
return nil, err
}
asteroid.IsSentryObject = nearEarthObjectForDate.IsSentryObject
asteroids = append(asteroids, asteroid)
}
}
return asteroids, nil
} |
|
gatherer_test.go | package apmgometrics_test
import (
"strings"
"testing"
"github.com/rcrowley/go-metrics"
"github.com/stretchr/testify/assert"
"github.com/elastic/apm-agent-go"
"github.com/elastic/apm-agent-go/model"
"github.com/elastic/apm-agent-go/module/apmgometrics"
"github.com/elastic/apm-agent-go/transport/transporttest"
)
func TestGatherer(t *testing.T) {
r := metrics.NewRegistry()
httpReqsTotal := metrics.GetOrRegisterCounter("http.requests_total", r)
httpReqsInflight := metrics.GetOrRegisterGauge("http.requests_inflight", r)
httpReqsTotal.Inc(123)
httpReqsInflight.Update(10)
g := apmgometrics.Wrap(r)
metrics := gatherMetrics(g)
assert.Len(t, metrics, 1)
for k := range metrics[0].Samples {
if !strings.HasPrefix(k, "http.") {
delete(metrics[0].Samples, k)
}
}
assert.Equal(t, []*model.Metrics{{
Samples: map[string]model.Metric{
"http.requests_total": {
Value: 123,
},
"http.requests_inflight": {
Value: 10,
},
},
}}, metrics)
}
func TestHistogram(t *testing.T) |
func gatherMetrics(g elasticapm.MetricsGatherer) []*model.Metrics {
tracer, transport := transporttest.NewRecorderTracer()
defer tracer.Close()
tracer.RegisterMetricsGatherer(g)
tracer.SendMetrics(nil)
metrics := transport.Payloads()[0].Metrics()
for _, m := range metrics {
m.Timestamp = model.Time{}
}
return metrics
}
| {
r := metrics.NewRegistry()
sample := metrics.NewUniformSample(1024)
hist := metrics.GetOrRegisterHistogram("histogram", r, sample)
hist.Update(50)
hist.Update(100)
hist.Update(150)
g := apmgometrics.Wrap(r)
metrics := gatherMetrics(g)
for name := range metrics[0].Samples {
if !strings.HasPrefix(name, "histogram.") {
delete(metrics[0].Samples, name)
}
}
assert.Equal(t, map[string]model.Metric{
"histogram.count": {Value: 3},
"histogram.total": {Value: 300},
"histogram.min": {Value: 50},
"histogram.max": {Value: 150},
"histogram.stddev": {Value: 40.824829046386306},
"histogram.percentile.50": {Value: 100},
"histogram.percentile.95": {Value: 150},
"histogram.percentile.99": {Value: 150},
}, metrics[0].Samples)
} |
errors.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::definitions::Type;
use graphql_syntax::type_system_node::OperationType;
use interner::StringKey;
use thiserror::Error;
pub type Result<T> = std::result::Result<T, SchemaError>;
| #[derive(Debug, Error)]
pub enum SchemaError {
#[error("Duplicate {0:?} type definition, got '{1}' and '{2}'.")]
DuplicateOperationDefinition(OperationType, StringKey, StringKey),
#[error("Duplicate directive definition '{0}'.")]
DuplicateDirectiveDefinition(StringKey),
#[error("Cannot extend type '{0}', the type is not defined on the server schema.")]
ExtendUndefinedType(StringKey),
#[error("Expected an object type for name '{0}', got '{1:?}'.")]
ExpectedObjectReference(StringKey, Type),
#[error("Expected an interface type for name '{0}', got '{1:?}'.")]
ExpectedInterfaceReference(StringKey, Type),
#[error("Reference to undefined type '{0}'.")]
UndefinedType(StringKey),
#[error("Duplicate field definition '{0}' found.")]
DuplicateField(StringKey),
#[error("Duplicate definition for type '{0}'.")]
DuplicateType(StringKey),
#[error("Invalid ID '{0}' provided for type '{1}'")]
UnknownTypeID(usize, String),
// TODO: These should be replaced with error codes or by unifying the parsers.
#[error("Parse Error '{0}'.")]
Syntax(String),
} | |
die.py | from random import randint | self.sides = sides
def roll_die(self):
print(randint(1,self.sides)) |
class Die():
def __init__(self, sides): |
encoding.rs | //! Provides an Encoding enum.
use std::fmt;
use std::str;
pub use self::Encoding::{Chunked, Gzip, Deflate, Compress, Identity, EncodingExt};
/// A value to represent an encoding used in `Transfer-Encoding`
/// or `Accept-Encoding` header.
#[derive(Clone, PartialEq, Debug)]
pub enum Encoding {
/// The `chunked` encoding.
Chunked,
/// The `gzip` encoding.
Gzip,
/// The `deflate` encoding.
Deflate,
/// The `compress` encoding.
Compress,
/// The `identity` encoding.
Identity,
/// Some other encoding that is less common, can be any String.
EncodingExt(String)
}
impl fmt::Display for Encoding {
fn | (&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
Chunked => "chunked",
Gzip => "gzip",
Deflate => "deflate",
Compress => "compress",
Identity => "identity",
EncodingExt(ref s) => s.as_slice()
})
}
}
impl str::FromStr for Encoding {
type Err = ();
fn from_str(s: &str) -> Result<Encoding, ()> {
match s {
"chunked" => Ok(Chunked),
"deflate" => Ok(Deflate),
"gzip" => Ok(Gzip),
"compress" => Ok(Compress),
"identity" => Ok(Identity),
_ => Ok(EncodingExt(s.to_string()))
}
}
}
| fmt |
pcap_graph.py | ''' pcap_graph worker '''
import zerorpc
import os
import pprint
import gevent
def gsleep():
''' Convenience method for gevent.sleep '''
print '*** Gevent Sleep ***'
gevent.sleep(0)
class PcapGraph(object):
''' This worker generates a graph from a PCAP (depends on Bro) '''
dependencies = ['pcap_bro']
def __init__(self):
''' Initialization '''
self.workbench = zerorpc.Client(timeout=300, heartbeat=60)
self.workbench.connect('tcp://127.0.0.1:4242')
self.mime_types = ['application/x-dosexec', 'application/pdf', 'application/zip',
'application/jar', 'application/vnd.ms-cab-compressed',
'application/x-shockwave-flash']
self.exclude_mime_types = ['text/plain','text/html','image/jpeg','image/png']
# Caches for nodes and relationships to avoid adding things over and over
self.node_cache = set()
self.rel_cache = set()
# In general this is heavy handed but seems better to do than not do
self.workbench.clear_graph_db()
# Graph methods
def add_node(self, node_id, name, labels):
|
def add_rel(self, source_id, target_id, rel):
''' Cache aware add_rel '''
if (source_id, target_id) not in self.rel_cache:
self.workbench.add_rel(source_id, target_id, rel)
self.rel_cache.add((source_id, target_id))
def execute(self, input_data):
''' Okay this worker is going build graphs from PCAP Bro output logs '''
# Grab the Bro log handles from the input
bro_logs = input_data['pcap_bro']
# DNS log
stream = self.workbench.stream_sample(bro_logs['dns_log'])
self.dns_log_graph(stream)
# Weird log
if 'weird_log' in bro_logs:
stream = self.workbench.stream_sample(bro_logs['weird_log'])
self.weird_log_graph(stream)
# HTTP log
gsleep()
stream = self.workbench.stream_sample(bro_logs['http_log'])
self.http_log_graph(stream)
# Files log
gsleep()
stream = self.workbench.stream_sample(bro_logs['files_log'])
self.files_log_graph(stream)
# Conn log
# Conn log can be big; skipping for now
'''
gsleep()
stream = self.workbench.stream_sample(bro_logs['conn_log'])
self.conn_log_graph(stream)
'''
return {'output':'go to http://localhost:7474/browser and execute this query "match (s:origin), (t:file), p=allShortestPaths((s)--(t)) return p"'}
def conn_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro conn.log) '''
conn_log = list(stream)
print 'Entering conn_log_graph...(%d rows)' % len(conn_log)
for row in stream:
# Add the connection id with service as one of the labels
self.add_node(row['uid'], row['uid'][:6], ['conn_id', row['service']])
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['ip', 'origin'])
# Add the response host
self.add_node(row['id.resp_h'], row['id.resp_h'], ['ip', 'response'])
# Add the ip->connection relationships
self.add_rel(row['uid'], row['id.orig_h'], 'origin')
self.add_rel(row['uid'], row['id.resp_h'], 'response')
def http_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro http.log) '''
http_log = list(stream)
print 'Entering http_log_graph...(%d rows)' % len(http_log)
for row in http_log:
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the response host and reponse ip
self.add_node(row['host'], row['host'], ['host'])
self.add_node(row['id.resp_h'], row['id.resp_h'], ['host'])
# Add the http request relationships
self.add_rel(row['id.orig_h'], row['host'], 'http_request')
self.add_rel(row['host'], row['id.resp_h'], 'A')
# If the mime-type is interesting add the uri and the host->uri->host relationships
'''
if row['resp_mime_types'] in self.mime_types:
self.add_node(row['uri'], row['resp_mime_types'], ['file'])
self.add_rel(row['uri'], row['id.resp_h'], 'file')
'''
def dns_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro dns.log) '''
dns_log = list(stream)
print 'Entering dns_log_graph...(%d rows)' % len(dns_log)
for row in dns_log:
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the query host
self.add_node(row['query'], row['query'], ['host', 'dns_query'])
# The relationship between origin host and query
self.add_rel(row['id.orig_h'], row['query'], 'dns_query')
# Add the DNS answers as hosts and add the relationships
for answer in row['answers'].split(','):
self.add_node(answer, answer, ['host'])
self.add_rel(row['query'], answer, row['qtype_name'])
def weird_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro weird.log) '''
weird_log = list(stream)
print 'Entering weird_log_graph...(%d rows)' % len(weird_log)
# Here we're just going to capture that something weird
# happened between two hosts
weird_pairs = set()
for row in weird_log:
weird_pairs.add((row['id.orig_h'], row['id.resp_h']))
# Okay now make the weird node for each pair
for pair in weird_pairs:
# Skip '-' hosts
if (pair[0] == '-'):
continue
# Add the originating host
self.add_node(pair[0], pair[0], ['host', 'origin'])
# Add the response host
self.add_node(pair[1], pair[1], ['host'])
# Add a weird node
weird_name = 'weird'+pair[0]+'_'+pair[1]
self.add_node(weird_name, 'weird', ['weird'])
# The relationships between the nodes
self.add_rel(pair[0], weird_name, 'weird')
self.add_rel(weird_name, pair[1], 'weird')
def files_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro files.log) '''
file_log = list(stream)
print 'Entering file_log_graph...(%d rows)' % len(file_log)
for row in file_log:
# If the mime-type is interesting add the uri and the host->uri->host relationships
if row['mime_type'] not in self.exclude_mime_types:
# Check for weird conditions
if (row['total_bytes'] == '-'):
continue
if ('-' in row['md5']):
continue
# Check for missing bytes
if row['missing_bytes']:
labels = ['file','missing']
else:
labels = ['file']
# Make the file node name kewl
name = '%6s %s %.0f-KB' % (row['md5'][:6], row['mime_type'], row['total_bytes']/1024.0)
if row['missing_bytes']:
name += '*'
name = name.replace('application/','')
# Add the file node
self.add_node(row['md5'], name, labels)
# Add the tx_host
self.add_node(row['tx_hosts'], row['tx_hosts'], ['host'])
# Add the file->tx_host relationship
self.add_rel(row['tx_hosts'], row['md5'], 'file')
def __del__(self):
''' Class Cleanup '''
# Close zeroRPC client
self.workbench.close()
# Unit test: Create the class, the proper input and run the execute() method for a test
def test():
''' pcap_graph.py: Unit test '''
# This worker test requires a local server as it relies on the recursive dependencies
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect("tcp://127.0.0.1:4242")
# Generate the input data for this worker
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/pcap/kitchen_boss.pcap')
md5 = workbench.store_sample(open(data_path, 'rb').read(), 'kitchen_boss.pcap', 'pcap')
input_data = workbench.work_request('pcap_bro', md5)
# Execute the worker (unit test)
worker = PcapGraph()
output = worker.execute(input_data)
print '\n<<< Unit Test >>>'
pprint.pprint(output)
# Execute the worker (server test)
output = workbench.work_request('pcap_graph', md5)
print '\n<<< Server Test >>>'
pprint.pprint(output)
if __name__ == "__main__":
test()
| ''' Cache aware add_node '''
if node_id not in self.node_cache:
self.workbench.add_node(node_id, name, labels)
self.node_cache.add(node_id) |
InsurancePlan_Benefit1.rs | #![allow(unused_imports, non_camel_case_types)]
use crate::model::CodeableConcept::CodeableConcept;
use crate::model::Extension::Extension;
use crate::model::InsurancePlan_Cost::InsurancePlan_Cost;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// Details of a Health Insurance product/plan provided by an organization.
#[derive(Debug)]
pub struct InsurancePlan_Benefit1<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl InsurancePlan_Benefit1<'_> {
pub fn new(value: &Value) -> InsurancePlan_Benefit1 {
InsurancePlan_Benefit1 {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// List of the costs associated with a specific benefit.
pub fn cost(&self) -> Option<Vec<InsurancePlan_Cost>> {
if let Some(Value::Array(val)) = self.value.get("cost") {
return Some(
val.into_iter()
.map(|e| InsurancePlan_Cost {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may
/// be any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element in
/// which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To
/// make the use of extensions safe and manageable, there is a strict set of
/// governance applied to the definition and use of extensions. Though any
/// implementer can define an extension, there is a set of requirements that SHALL
/// be met as part of the definition of the extension. Applications processing a
/// resource are required to check for modifier extensions. Modifier extensions
/// SHALL NOT change the meaning of any elements on Resource or DomainResource
/// (including cannot change the meaning of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> |
/// Type of specific benefit (preventative; primary care office visit; speciality
/// office visit; hospitalization; emergency room; urgent care).
pub fn fhir_type(&self) -> CodeableConcept {
CodeableConcept {
value: Cow::Borrowed(&self.value["type"]),
}
}
pub fn validate(&self) -> bool {
if let Some(_val) = self.cost() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if !self.fhir_type().validate() {
return false;
}
return true;
}
}
#[derive(Debug)]
pub struct InsurancePlan_Benefit1Builder {
pub(crate) value: Value,
}
impl InsurancePlan_Benefit1Builder {
pub fn build(&self) -> InsurancePlan_Benefit1 {
InsurancePlan_Benefit1 {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: InsurancePlan_Benefit1) -> InsurancePlan_Benefit1Builder {
InsurancePlan_Benefit1Builder {
value: (*existing.value).clone(),
}
}
pub fn new(fhir_type: CodeableConcept) -> InsurancePlan_Benefit1Builder {
let mut __value: Value = json!({});
__value["type"] = json!(fhir_type.value);
return InsurancePlan_Benefit1Builder { value: __value };
}
pub fn cost<'a>(
&'a mut self,
val: Vec<InsurancePlan_Cost>,
) -> &'a mut InsurancePlan_Benefit1Builder {
self.value["cost"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut InsurancePlan_Benefit1Builder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut InsurancePlan_Benefit1Builder {
self.value["id"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut InsurancePlan_Benefit1Builder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
}
| {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
} |
apps.py | from django.apps import AppConfig
class ApiConfig(AppConfig):
| name = 'vaccine_card.api' |
|
init.js | const Database = require("./config")
const initDb = {
async init() {
const db = await Database()
await db.exec(`CREATE TABLE rooms (
id INTEGER PRIMARY KEY,
pass TEXT
)`);
| read INT,
room INT
)`);
await db.close()
}
}
initDb.init(); | await db.exec(`CREATE TABLE questions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.