file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
f275ba0953af8196784d5ede049f55ff5d3b99a5.js | mycallback( {"CONTRIBUTOR OCCUPATION": "MD", "CONTRIBUTION AMOUNT (F3L Bundled)": "500", "ELECTION CODE": "G2010", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "Self-Employed", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "4033 3rd Avenue", "CONTRIBUTOR MIDDLE NAME": "", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "CA", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "Jerrold", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20101018", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00344671", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Glassman", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "921032117", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "San Diego", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "A-C66969", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110504/727421.fec_1.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "Dr.", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "1250", "FORM TYPE": "SA11ai"}); | mycallback( {"CONTRIBUTOR OCCUPATION": "MD", "CONTRIBUTION AMOUNT (F3L Bundled)": "500", "ELECTION CODE": "G2010", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "Self-Employed", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "4033 3rd Avenue", "CONTRIBUTOR MIDDLE NAME": "", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "CA", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "Jerrold", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20101018", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00344671", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Glassman", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "921032117", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "San Diego", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "A-C66969", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110504/727421.fec_1.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "Dr.", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "1250", "FORM TYPE": "SA11ai"}); |
|
class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.js | var class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator =
[
[ "SecurityGuideListEnumerator", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#a5bcb4a2f13c0bfdef050b3a8a1755a8d", null ], | [ "MoveNext", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#ae19075d7d1655f8c9f79a0ecb6ec88a1", null ],
[ "Reset", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#abbc764e86b98b277d64ac4b18e73e202", null ],
[ "Current", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#ad1723667b7441d71687b9ae4765afb0a", null ],
[ "Current", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#aef17e010f48cfef8b0fad99c46707e64", null ]
]; | [ "Dispose", "class_h_p_1_1_h_p_t_r_i_m_1_1_s_d_k_1_1_security_guide_list_1_1_security_guide_list_enumerator.html#ac7fa88c65e6c13931596eb5453258b0e", null ], |
model_admin.py | from django.contrib import admin
| class ModelAdmin(admin.ModelAdmin):
"""Future app-wide admin customizations""" | |
kubernetes_cluster_resolver.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Kubernetes."""
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url
from tensorflow.python.training import server_lib
from tensorflow.python.util.tf_export import tf_export
@tf_export('distribute.cluster_resolver.KubernetesClusterResolver')
class KubernetesClusterResolver(ClusterResolver):
"""ClusterResolver for Kubernetes.
This is an implementation of cluster resolvers for Kubernetes. When given the
the Kubernetes namespace and label selector for pods, we will retrieve the
pod IP addresses of all running pods matching the selector, and return a
ClusterSpec based on that information.
Note: it cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it
with some distribution strategies like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to
specify `task_type` and `task_id` by setting these attributes.
Usage example with tf.distribute.Strategy:
```Python
# On worker 0
cluster_resolver = KubernetesClusterResolver(
{"worker": ["job-name=worker-cluster-a", "job-name=worker-cluster-b"]})
cluster_resolver.task_type = "worker"
cluster_resolver.task_id = 0
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
# On worker 1
cluster_resolver = KubernetesClusterResolver(
{"worker": ["job-name=worker-cluster-a", "job-name=worker-cluster-b"]})
cluster_resolver.task_type = "worker"
cluster_resolver.task_id = 1
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
```
"""
def __init__(self,
job_to_label_mapping=None,
tf_server_port=8470,
rpc_layer='grpc',
override_client=None):
"""Initializes a new KubernetesClusterResolver.
This initializes a new Kubernetes ClusterResolver. The ClusterResolver
will attempt to talk to the Kubernetes master to retrieve all the instances
of pods matching a label selector.
Args:
job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.
This allows users to specify many TensorFlow jobs in one Cluster
Resolver, and each job can have pods belong with different label
selectors. For example, a sample mapping might be
```
{'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],
'ps': ['job-name=ps-1', 'job-name=ps-2']}
```
tf_server_port: The port the TensorFlow server is listening on.
rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate
between tasks in Kubernetes. Defaults to 'grpc'.
override_client: The Kubernetes client (usually automatically retrieved
using `from kubernetes import client as k8sclient`). If you pass this
in, you are responsible for setting Kubernetes credentials manually.
Raises:
ImportError: If the Kubernetes Python client is not installed and no
`override_client` is passed in.
RuntimeError: If autoresolve_task is not a boolean or a callable.
"""
try:
from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top
k8sconfig.load_kube_config()
except ImportError:
if not override_client:
raise ImportError('The Kubernetes Python client must be installed '
'before using the Kubernetes Cluster Resolver. '
'To install the Kubernetes Python client, run '
'`pip install kubernetes` on your command line.')
if not job_to_label_mapping:
job_to_label_mapping = {'worker': ['job-name=tensorflow']}
self._job_to_label_mapping = job_to_label_mapping
self._tf_server_port = tf_server_port
self._override_client = override_client
self.task_type = None
self.task_id = None
self.rpc_layer = rpc_layer
def | (self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a session.
You must have set the task_type and task_id object properties before
calling this function, or pass in the `task_type` and `task_id`
parameters when using this function. If you do both, the function parameters
will override the object properties.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (Optional) The type of the TensorFlow task of the master.
task_id: (Optional) The index of the TensorFlow task of the master.
rpc_layer: (Optional) The RPC protocol for the given cluster.
Returns:
The name or URL of the session master.
"""
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
if task_type is not None and task_id is not None:
return format_master_url(
self.cluster_spec().task_address(task_type, task_id),
rpc_layer or self.rpc_layer)
return ''
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest info from Kubernetes.
We retrieve the information from the Kubernetes master every time this
method is called.
Returns:
A ClusterSpec containing host information returned from Kubernetes.
Raises:
RuntimeError: If any of the pods returned by the master is not in the
`Running` phase.
"""
if self._override_client:
client = self._override_client
else:
from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top
from kubernetes import client as k8sclient # pylint: disable=g-import-not-at-top
k8sconfig.load_kube_config()
client = k8sclient.CoreV1Api()
cluster_map = {}
for tf_job in self._job_to_label_mapping:
all_pods = []
for selector in self._job_to_label_mapping[tf_job]:
ret = client.list_pod_for_all_namespaces(label_selector=selector)
selected_pods = []
# Sort the list by the name to make sure it doesn't change call to call.
for pod in sorted(ret.items, key=lambda x: x.metadata.name):
if pod.status.phase == 'Running':
selected_pods.append(
'%s:%s' % (pod.status.host_ip, self._tf_server_port))
else:
raise RuntimeError('Pod "%s" is not running; phase: "%s"' %
(pod.metadata.name, pod.status.phase))
all_pods.extend(selected_pods)
cluster_map[tf_job] = all_pods
return server_lib.ClusterSpec(cluster_map)
| master |
info_test.go | package golog
import (
"bytes"
"fmt"
"path"
"runtime"
"sync/atomic"
"testing"
"time"
)
func TestNewInfo(t *testing.T) {
var buf bytes.Buffer
log := NewLogger(nil)
log.SetOutput(&buf)
log.SetLogLevel(DebugLevel)
// Get current function name
pc := make([]uintptr, 15)
n := runtime.Callers(2, pc)
frames := runtime.CallersFrames(pc[:n])
frame, _ := frames.Next()
_, filename, line, _ := runtime.Caller(1)
filename = path.Base(filename)
info := &Info{
ID: atomic.AddUint64(&logNo, 1),
Time: time.Now().Format(log.worker.timeFormat),
Module: log.Options.Module,
Function: frame.Function,
Level: InfoLevel,
Message: "Hello World!",
Filename: filename,
Line: line,
}
log.worker.Log(ErrorLevel, 2, info)
// "[35munknown 2019-10-15 19:20:51 INF - Hello World![0m"
//want := fmt.Sprintf("[31m[unknown] #80 %s INFO Hello World![0m\n", time.Now().Format("2006-01-02 15:04:05"))
want := fmt.Sprintf("[31m[000080] [unknown] INFO %s testing.go#909-testing.tRunner : Hello World![0m\n", time.Now().Format("2006-01-02 15:04:05"))
have := buf.String()
if have != want {
t.Errorf("\nWant: %sHave: %s", want, have)
}
| buf.Reset()
} |
|
index.js | import { DEBUG } from './constants'; |
console.log('DEBUG should be true: ', DEBUG); |
|
main.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
)
func main() | {
var (
flagSet = flag.NewFlagSet("sink", flag.ExitOnError)
ip = flagSet.String("ip", os.Getenv("DOCKER_IP"), "local ipv4 to report for `/latest/meta-data/local-ipv4`")
port = flagSet.Int("port", 8080, "port to listen to")
)
if err := flagSet.Parse(os.Args[1:]); err != nil {
log.Fatal(err)
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Connection", "Close")
fmt.Fprintf(w, "%s", *ip)
})))
} |
|
security_test.go | package services
import (
"github.com/ant0ine/go-json-rest/rest"
"github.com/julienbayle/jeparticipe/entities"
"github.com/stretchr/testify/assert"
"net/http"
"os"
"testing"
)
func NewRequest() *rest.Request |
func TestAdminPriviledge(t *testing.T) {
r := NewRequest()
r.PathParams["event"] = "testevent"
r.Env["REMOTE_USER"] = "testevent-admin"
assert.True(t, hasAdminPriviledge(r))
assert.False(t, hasSuperAdminPriviledge(r))
}
func TestSuperadminPriviledge(t *testing.T) {
r := NewRequest()
r.PathParams["event"] = "testevent"
r.Env["REMOTE_USER"] = "superadmin"
assert.True(t, hasAdminPriviledge(r))
r = NewRequest()
r.Env["REMOTE_USER"] = "superadmin"
assert.True(t, hasAdminPriviledge(r))
assert.True(t, hasSuperAdminPriviledge(r))
}
func TestNoPriviledge(t *testing.T) {
r := NewRequest()
r.PathParams["event"] = "testevent"
r.Env["REMOTE_USER"] = "auser"
assert.False(t, hasAdminPriviledge(r))
r = NewRequest()
r.PathParams["event"] = "testevent"
r.Env["REMOTE_USER"] = "otherevent"
assert.False(t, hasAdminPriviledge(r))
r = NewRequest()
assert.False(t, hasAdminPriviledge(r))
r = NewRequest()
r.Env["REMOTE_USER"] = "admin"
assert.False(t, hasAdminPriviledge(r))
r = NewRequest()
r.Env["REMOTE_USER"] = "-admin"
assert.False(t, hasAdminPriviledge(r))
r = NewRequest()
r.Env["REMOTE_USER"] = "testevent-admin"
assert.False(t, hasAdminPriviledge(r))
}
func TestAuthentificator(t *testing.T) {
// Create a test event
repositoryService := NewRepositoryService("security.db")
repositoryService.CreateCollectionIfNotExists(EventsBucketName)
defer repositoryService.ShutDown()
defer os.Remove("security.db")
eventService := &EventService{
RepositoryService: repositoryService,
Secret: "secret",
}
event, err := entities.NewPendingConfirmationEvent("testevent", "ip", "[email protected]")
assert.NoError(t, err)
eventService.ConfirmAndSaveEvent(event)
eventAdminPass := event.AdminPassword
assert.Len(t, eventAdminPass, 8)
// Wrong login or pass
assert.False(t, Authenticate(eventService, "superpass", "a login", "a pass"))
assert.False(t, Authenticate(eventService, "superpass", "", "a pass"))
assert.False(t, Authenticate(eventService, "superpass", "a login", ""))
// Super admin
assert.False(t, Authenticate(eventService, "superpass", "superadmin", "a pass"))
assert.False(t, Authenticate(eventService, "superpass", "superadmin", ""))
assert.True(t, Authenticate(eventService, "superpass", "superadmin", "superpass"))
// Event admin
assert.False(t, Authenticate(eventService, "superpass", event.Code+"-admin", "a pass"))
assert.False(t, Authenticate(eventService, "superpass", event.Code+"-admin", ""))
assert.True(t, Authenticate(eventService, "superpass", event.Code+"-admin", eventAdminPass))
}
func TestNewPassword(t *testing.T) {
assert.Len(t, NewPassword(4), 4)
assert.Len(t, NewPassword(10), 10)
}
| {
origReq, _ := http.NewRequest("", "", nil)
r := &rest.Request{
origReq,
make(map[string]string, 1),
make(map[string]interface{}, 1),
}
return r
} |
block.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef};
use rustc::ty;
use rustc::mir::repr as mir;
use abi::{Abi, FnType, ArgType};
use adt;
use base;
use build;
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, C_undef};
use debuginfo::DebugLoc;
use Disr;
use machine::{llalign_of_min, llbitsize_of_real};
use meth;
use type_of;
use glue;
use type_::Type;
use super::{MirContext, TempRef, drop};
use super::lvalue::{LvalueRef, load_fat_ptr};
use super::operand::OperandRef;
use super::operand::OperandValue::{self, FatPtr, Immediate, Ref};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) {
debug!("trans_block({:?})", bb);
let mut bcx = self.bcx(bb);
let mir = self.mir.clone();
let data = mir.basic_block_data(bb);
// MSVC SEH bits
let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
(Some(cp), Some(cb))
} else {
(None, None)
};
let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad {
bcx.cleanup_ret(cp, Some(llbb));
} else {
bcx.br(llbb);
};
for statement in &data.statements {
bcx = self.trans_statement(bcx, statement);
}
let terminator = data.terminator();
debug!("trans_block: terminator: {:?}", terminator);
let debug_loc = DebugLoc::ScopeAt(self.scopes[terminator.scope.index()],
terminator.span);
debug_loc.apply_to_bcx(&bcx);
debug_loc.apply(bcx.fcx());
match terminator.kind {
mir::TerminatorKind::Resume => {
if let Some(cleanup_pad) = cleanup_pad {
bcx.cleanup_ret(cleanup_pad, None);
} else {
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
bcx.with_block(|bcx| {
base::call_lifetime_end(bcx, ps);
base::trans_unwind_resume(bcx, lp);
});
}
}
mir::TerminatorKind::Goto { target } => {
funclet_br(bcx, self.llblock(target));
}
mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => {
let cond = self.trans_operand(&bcx, cond);
let lltrue = self.llblock(true_bb);
let llfalse = self.llblock(false_bb);
bcx.cond_br(cond.immediate(), lltrue, llfalse);
}
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let repr = adt::represent_type(bcx.ccx(), ty);
let discr = bcx.with_block(|bcx|
adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None, true)
);
// The else branch of the Switch can't be hit, so branch to an unreachable
// instruction so LLVM knows that
let unreachable_blk = self.unreachable_block();
let switch = bcx.switch(discr, unreachable_blk.llbb, targets.len());
assert_eq!(adt_def.variants.len(), targets.len());
for (adt_variant, target) in adt_def.variants.iter().zip(targets) {
let llval = bcx.with_block(|bcx|
adt::trans_case(bcx, &repr, Disr::from(adt_variant.disr_val))
);
let llbb = self.llblock(*target);
build::AddCase(switch, llval, llbb)
}
}
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
let llval = self.trans_constval(&bcx, value, switch_ty).immediate();
let llbb = self.llblock(*target);
build::AddCase(switch, llval, llbb)
}
}
mir::TerminatorKind::Return => {
bcx.with_block(|bcx| {
self.fcx.build_return_block(bcx, debug_loc);
})
}
mir::TerminatorKind::Drop { ref value, target, unwind } => {
let lvalue = self.trans_lvalue(&bcx, value);
let ty = lvalue.ty.to_ty(bcx.tcx());
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
funclet_br(bcx, self.llblock(target));
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
let drop_ty = glue::get_drop_glue_type(bcx.ccx(), ty);
let llvalue = if drop_ty != ty {
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
} else {
lvalue.llval
};
if let Some(unwind) = unwind {
let uwbcx = self.bcx(unwind);
let unwind = self.make_landing_pad(uwbcx);
bcx.invoke(drop_fn,
&[llvalue],
self.llblock(target),
unwind.llbb(),
cleanup_bundle.as_ref());
self.bcx(target).at_start(|bcx| {
debug_loc.apply_to_bcx(bcx);
drop::drop_fill(bcx, lvalue.llval, ty)
});
} else {
bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref());
drop::drop_fill(&bcx, lvalue.llval, ty);
funclet_br(bcx, self.llblock(target));
}
}
mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func);
let (mut callee, abi, sig) = match callee.ty.sty {
ty::TyFnDef(def_id, substs, f) => {
(Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig)
}
ty::TyFnPtr(f) => {
(Callee {
data: Fn(callee.immediate()),
ty: callee.ty
}, f.abi, &f.sig)
}
_ => bug!("{} is not callable", callee.ty)
};
let sig = bcx.tcx().erase_late_bound_regions(sig);
// Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, _, _), &Intrinsic) => {
Some(bcx.tcx().item_name(def_id).as_str())
}
_ => None
};
let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
if intrinsic == Some("move_val_init") {
let &(_, target) = destination.as_ref().unwrap();
// The first argument is a thin destination pointer.
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val);
self.set_operand_dropped(&bcx, &args[1]);
funclet_br(bcx, self.llblock(target));
return;
}
if intrinsic == Some("transmute") {
let &(ref dest, target) = destination.as_ref().unwrap();
self.with_lvalue_ref(&bcx, dest, |this, dest| {
this.trans_transmute(&bcx, &args[0], dest);
});
self.set_operand_dropped(&bcx, &args[0]);
funclet_br(bcx, self.llblock(target));
return;
}
let extra_args = &args[sig.inputs.len()..];
let extra_args = extra_args.iter().map(|op_arg| {
self.mir.operand_ty(bcx.tcx(), op_arg)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination
let ret_dest = if let Some((ref dest, _)) = *destination {
let is_intrinsic = if let Intrinsic = callee.data {
true
} else {
false
};
self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
// Split the rust-call tupled arguments off.
let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
let (tup, args) = args.split_last().unwrap();
(args, Some(tup))
} else {
(&args[..], None)
};
let mut idx = 0;
for arg in first_args {
let val = self.trans_operand(&bcx, arg).val;
self.trans_argument(&bcx, val, &mut llargs, &fn_ty,
&mut idx, &mut callee.data);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
&mut idx, &mut callee.data)
}
let fn_ptr = match callee.data {
NamedTupleConstructor(_) => {
// FIXME translate this like mir::Rvalue::Aggregate.
callee.reify(bcx.ccx()).val
}
Intrinsic => {
use callee::ArgVals;
use expr::{Ignore, SaveIn};
use intrinsic::trans_intrinsic_call;
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
(SaveIn(llargs[0]), &llargs[1..])
}
ReturnDest::Nothing => (Ignore, &llargs[..]),
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
bcx.with_block(|bcx| {
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
ArgVals(llargs), dest,
debug_loc);
});
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
// Make a fake operand for store_return
let op = OperandRef {
val: OperandValue::Ref(dst),
ty: sig.output.unwrap()
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
}
if let Some((_, target)) = *destination {
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
} else {
// trans_intrinsic_call already used Unreachable.
// bcx.unreachable();
}
return;
}
Fn(f) => f,
Virtual(_) => bug!("Virtual fn ptr not extracted")
};
// Many different ways to call a function handled here
if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) {
let ret_bcx = if let Some((_, target)) = *destination {
self.blocks[target.index()]
} else {
self.unreachable_block()
};
let landingpad = self.make_landing_pad(cleanup);
let invokeret = bcx.invoke(fn_ptr,
&llargs,
ret_bcx.llbb,
landingpad.llbb(),
cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(invokeret);
landingpad.at_start(|bcx| {
debug_loc.apply_to_bcx(bcx);
for op in args {
self.set_operand_dropped(bcx, op);
}
});
if destination.is_some() {
let ret_bcx = ret_bcx.build();
ret_bcx.at_start(|ret_bcx| {
debug_loc.apply_to_bcx(ret_bcx);
let op = OperandRef {
val: OperandValue::Immediate(invokeret),
ty: sig.output.unwrap()
};
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
for op in args {
self.set_operand_dropped(&ret_bcx, op);
}
});
}
} else {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(llret);
if let Some((_, target)) = *destination {
let op = OperandRef {
val: OperandValue::Immediate(llret),
ty: sig.output.unwrap()
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
} else {
// no need to drop args, because the call never returns
bcx.unreachable();
}
}
}
}
}
fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
val: OperandValue,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// Treat the values in a fat pointer separately.
if let FatPtr(ptr, meta) = val {
if *next_idx == 0 {
if let Virtual(idx) = *callee {
let llfn = bcx.with_block(|bcx| {
meth::get_virtual_method(bcx, meta, idx)
});
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(bcx.pointercast(llfn, llty));
}
}
self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee);
return;
}
let arg = &fn_ty.args[*next_idx];
*next_idx += 1;
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty));
}
if arg.is_ignore() {
return;
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match val {
Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
bcx.store(llval, llscratch);
(llscratch, true)
}
Immediate(llval) => (llval, false),
Ref(llval) => (llval, true),
FatPtr(_, _) => bug!("fat pointers handled above")
};
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx()) {
// We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
llval = bcx.trunc(llval, arg.original_ty);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
unsafe {
llvm::LLVMSetAlignment(llval, llalign);
}
} else {
llval = bcx.load(llval);
}
}
llargs.push(llval);
}
fn trans_arguments_untupled(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return;
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => span_bug!(
self.mir.span,
"bad final argument to \"rust-call\" fn {:?}", lv_ty)
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
for (n, &ty) in result_types.iter().enumerate() {
let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n);
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
let (lldata, llextra) = load_fat_ptr(bcx, ptr);
FatPtr(lldata, llextra)
} else {
// Don't bother loading the value, trans_argument will.
Ref(ptr)
};
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
}
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
let ccx = bcx.ccx();
if let Some(slot) = self.llpersonalityslot {
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
bcx.with_block(|bcx| {
let slot = base::alloca(bcx, llretty, "personalityslot");
self.llpersonalityslot = Some(slot);
base::call_lifetime_start(bcx, slot);
slot
})
}
}
/// Create a landingpad wrapper around the given Block.
///
/// No-op in MSVC SEH scheme.
fn make_landing_pad(&mut self,
cleanup: BlockAndBuilder<'bcx, 'tcx>)
-> BlockAndBuilder<'bcx, 'tcx>
{
if base::wants_msvc_seh(cleanup.sess()) {
return cleanup;
}
let bcx = self.fcx.new_block("cleanup", None).build();
let ccx = bcx.ccx();
let llpersonality = self.fcx.eh_personality();
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot);
bcx.br(cleanup.llbb());
bcx
}
/// Create prologue cleanuppad instruction under MSVC SEH handling scheme.
///
/// Also handles setting some state for the original trans and creating an operand bundle for
/// function calls.
fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> {
let bcx = self.bcx(bb);
let data = self.mir.basic_block_data(bb);
let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
let cleanup_pad = if use_funclets {
bcx.set_personality_fn(self.fcx.eh_personality());
bcx.at_start(|bcx| {
DebugLoc::None.apply_to_bcx(bcx);
Some(bcx.cleanup_pad(None, &[]))
})
} else {
None
};
// Set the landingpad global-state for old translator, so it knows about the SEH used.
bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad {
Some(common::LandingPad::msvc(cleanup_pad))
} else if data.is_cleanup {
Some(common::LandingPad::gnu())
} else {
None
});
cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f])))
}
fn | (&mut self) -> Block<'bcx, 'tcx> {
self.unreachable_block.unwrap_or_else(|| {
let bl = self.fcx.new_block("unreachable", None);
bl.build().unreachable();
self.unreachable_block = Some(bl);
bl
})
}
fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
self.blocks[bb.index()].build()
}
pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
self.blocks[bb.index()].llbb
}
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
// If the return is ignored, we can just return a do-nothing ReturnDest
if fn_ret_ty.is_ignore() {
return ReturnDest::Nothing;
}
let dest = match *dest {
mir::Lvalue::Temp(idx) => {
let lvalue_ty = self.mir.lvalue_ty(bcx.tcx(), dest);
let lvalue_ty = bcx.monomorphize(&lvalue_ty);
let ret_ty = lvalue_ty.to_ty(bcx.tcx());
match self.temps[idx as usize] {
TempRef::Lvalue(dest) => dest,
TempRef::Operand(None) => {
// Handle temporary lvalues, specifically Operand ones, as
// they don't have allocas
return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, idx)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
ReturnDest::IndirectOperand(tmp, idx)
} else {
ReturnDest::DirectOperand(idx)
};
}
TempRef::Operand(Some(_)) => {
bug!("lvalue temp already assigned to");
}
}
}
_ => self.trans_lvalue(bcx, dest)
};
if fn_ret_ty.is_indirect() {
llargs.push(dest.llval);
ReturnDest::Nothing
} else {
ReturnDest::Store(dest.llval)
}
}
fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
let mut val = self.trans_operand(bcx, src);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype);
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx(), def_id, substs);
let datum = f.reify(bcx.ccx());
val = OperandRef {
val: OperandValue::Immediate(datum.val),
ty: datum.ty
};
}
}
let llty = type_of::type_of(bcx.ccx(), val.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
self.store_operand(bcx, cast_ptr, val);
}
// Stores the return value of a function call into it's final location.
fn store_return(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
dest: ReturnDest,
ret_ty: ArgType,
op: OperandRef<'tcx>) {
use self::ReturnDest::*;
match dest {
Nothing => (),
Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
IndirectOperand(tmp, idx) => {
let op = self.trans_load(bcx, tmp, op.ty);
self.temps[idx as usize] = TempRef::Operand(Some(op));
}
DirectOperand(idx) => {
let op = if type_is_fat_ptr(bcx.tcx(), op.ty) {
let llval = op.immediate();
let ptr = bcx.extract_value(llval, 0);
let meta = bcx.extract_value(llval, 1);
OperandRef {
val: OperandValue::FatPtr(ptr, meta),
ty: op.ty
}
} else {
op
};
self.temps[idx as usize] = TempRef::Operand(Some(op));
}
}
}
}
enum ReturnDest {
// Do nothing, the return value is indirect or ignored
Nothing,
// Store the return value to the pointer
Store(ValueRef),
// Stores an indirect return value to an operand temporary lvalue
IndirectOperand(ValueRef, u32),
// Stores a direct return value to an operand temporary lvalue
DirectOperand(u32)
}
| unreachable_block |
special_indent_type.rs | use wasm_bindgen::prelude::*;
use serde::ser::{SerializeStruct, Serializer};
use serde::Serialize;
// INFO: wasm-bindgen only allow c-style enum for now
// Please convert typescript type to following type.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum SpecialIndentType { | #[wasm_bindgen]
#[derive(Serialize, Copy, Clone, Debug)]
pub enum SpecialIndentKind {
FirstLine,
Hanging,
}
impl Serialize for SpecialIndentType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
SpecialIndentType::FirstLine(s) => {
let mut t = serializer.serialize_struct("FirstLine", 2)?;
t.serialize_field("type", "firstLine")?;
t.serialize_field("val", &s)?;
t.end()
}
SpecialIndentType::Hanging(s) => {
let mut t = serializer.serialize_struct("Hanging", 2)?;
t.serialize_field("type", "hanging")?;
t.serialize_field("val", &s)?;
t.end()
}
}
}
} | FirstLine(i32),
Hanging(i32),
}
|
memory.go | package memory
import (
"errors"
"fmt"
"kube-proxless/internal/config"
"kube-proxless/internal/logger"
"kube-proxless/internal/model"
"kube-proxless/internal/utils"
"sync"
"time"
)
type Interface interface {
UpsertMemoryMap(route *model.Route) error
GetRouteByDomain(domain string) (*model.Route, error)
GetRouteByDeployment(deploy, namespace string) (*model.Route, error)
UpdateLastUsed(id string, t time.Time) error
UpdateIsRunning(id string, isRunning bool) error
DeleteRoute(id string) error
GetRoutesToScaleDown() map[string]model.Route
}
type MemoryMap struct {
m map[string]*model.Route
lock sync.RWMutex
}
func NewMemoryMap() *MemoryMap {
return &MemoryMap{
m: make(map[string]*model.Route),
lock: sync.RWMutex{},
}
}
func (s *MemoryMap) UpsertMemoryMap(route *model.Route) error {
// error if deployment or domains are already associated to another route
err := checkDeployAndDomainsOwnership(
s, route.GetId(), route.GetDeployment(), route.GetNamespace(), route.GetDomains())
if err != nil {
return err
}
if existingRoute, ok := s.m[route.GetId()]; ok {
// /!\ this need to be on top - otherwise the data will have already been overriden in the route
newKeys := cleanMemoryMap(
s,
existingRoute.GetDeployment(), existingRoute.GetNamespace(), existingRoute.GetDomains(),
route.GetDeployment(), route.GetNamespace(), route.GetDomains())
// associate the route to new deployment key / domains
for _, k := range newKeys {
s.m[k] = existingRoute
}
// TODO check the errors
_ = existingRoute.SetService(route.GetService())
_ = existingRoute.SetPort(route.GetPort())
_ = existingRoute.SetDeployment(route.GetDeployment())
_ = existingRoute.SetDomains(route.GetDomains())
existingRoute.SetTTLSeconds(route.GetTTLSeconds())
existingRoute.SetReadinessTimeoutSeconds(route.GetReadinessTimeoutSeconds())
// existingRoute is a pointer and it's changing dynamically - no need to "persist" the change in the map
keys := append(
[]string{route.GetId(), genDeploymentKey(existingRoute.GetDeployment(), existingRoute.GetNamespace())},
route.GetDomains()...)
logger.Debugf("Updated route - newKeys: [%s] - keys: [%s] - obj: %v", newKeys, keys, existingRoute)
} else {
createRoute(s, route)
}
return nil
}
// return an error if deploy or domains are already associated to a different id
func checkDeployAndDomainsOwnership(s *MemoryMap, id, deploy, ns string, domains []string) error {
r, err := s.GetRouteByDeployment(deploy, ns)
if err == nil && r.GetId() != id {
return errors.New(fmt.Sprintf("Deployment %s.%s is already owned by %s", deploy, ns, r.GetId()))
}
for _, d := range domains {
r, err = s.GetRouteByDomain(d)
if err == nil && r.GetId() != id {
return errors.New(fmt.Sprintf("Domain %s is already owned by %s", d, r.GetId()))
}
}
return nil
}
func createRoute(s *MemoryMap, route *model.Route) {
s.lock.Lock()
defer s.lock.Unlock()
deploymentKey := genDeploymentKey(route.GetDeployment(), route.GetNamespace())
s.m[route.GetId()] = route
s.m[deploymentKey] = route
for _, d := range route.GetDomains() {
s.m[d] = route
}
keys := append([]string{route.GetId(), deploymentKey}, route.GetDomains()...)
logger.Debugf("Created route - keys: [%s] - obj: %v", keys, route)
}
// Remove old domains and deployment from the map if they are not == new ones
// return the domains and deployment that are not a key in the map
func cleanMemoryMap(
s *MemoryMap,
oldDeploy, oldNs string, oldDomains []string,
newDeploy, newNs string, newDomains []string) []string {
s.lock.Lock()
defer s.lock.Unlock()
var newKeys []string
deployKeyNotInMap := cleanOldDeploymentFromMemoryMap(s, oldDeploy, oldNs, newDeploy, newNs)
if deployKeyNotInMap != "" {
newKeys = append(newKeys, deployKeyNotInMap)
}
domainsNotInMap := cleanOldDomainsFromMemoryMap(s, oldDomains, newDomains)
if newDomains != nil {
newKeys = append(newKeys, domainsNotInMap...)
}
if newKeys == nil {
return []string{}
}
return newKeys
}
// return the new deployment key if it does not exist in the map
func cleanOldDeploymentFromMemoryMap(s *MemoryMap, oldDeploy, oldNs, newDeploy, newNs string) string {
oldDeploymentKey := genDeploymentKey(oldDeploy, oldNs)
newDeploymentKey := genDeploymentKey(newDeploy, newNs)
if oldDeploymentKey != newDeploymentKey {
delete(s.m, oldDeploymentKey)
return newDeploymentKey
}
return ""
}
// TODO review complexity
// return the new domains that are not in the newDomains list
func | (s *MemoryMap, oldDomains, newDomains []string) []string {
// get the difference between the 2 domains arrays
diff := utils.DiffUnorderedArray(oldDomains, newDomains)
var newKeys []string
if diff != nil && len(diff) > 0 {
// remove domain from the map if they are not in the list of new Domains
for _, d := range diff {
if !utils.Contains(newDomains, d) {
delete(s.m, d)
} else {
newKeys = append(newKeys, d)
}
}
}
if newKeys == nil {
return []string{}
}
return newKeys
}
func genDeploymentKey(deployment, namespace string) string {
return fmt.Sprintf("%s.%s", deployment, namespace)
}
func (s *MemoryMap) GetRouteByDomain(domain string) (*model.Route, error) {
return getRoute(s, domain)
}
func (s *MemoryMap) GetRouteByDeployment(deploy, namespace string) (*model.Route, error) {
deploymentKey := genDeploymentKey(deploy, namespace)
return getRoute(s, deploymentKey)
}
func getRoute(s *MemoryMap, key string) (*model.Route, error) {
s.lock.Lock()
defer s.lock.Unlock()
if route, ok := s.m[key]; ok {
return route, nil
}
return nil, errors.New(fmt.Sprintf("Route %s not found in map", key))
}
func (s *MemoryMap) UpdateLastUsed(id string, t time.Time) error {
s.lock.Lock()
defer s.lock.Unlock()
if route, ok := s.m[id]; ok {
// No need to persist in the map, it's a pointer
route.SetLastUsed(t)
return nil
}
return errors.New(fmt.Sprintf("Route %s not found in map", id))
}
func (s *MemoryMap) UpdateIsRunning(id string, isRunning bool) error {
s.lock.Lock()
defer s.lock.Unlock()
if route, ok := s.m[id]; ok {
// No need to persist in the map, it's a pointer
route.SetIsRunning(isRunning)
return nil
}
return errors.New(fmt.Sprintf("Route %s not found in map", id))
}
func (s *MemoryMap) DeleteRoute(id string) error {
s.lock.Lock()
defer s.lock.Unlock()
if route, ok := s.m[id]; ok {
deploymentKey := genDeploymentKey(route.GetDeployment(), route.GetNamespace())
delete(s.m, route.GetId())
delete(s.m, deploymentKey)
for _, d := range route.GetDomains() {
delete(s.m, d)
}
return nil
}
return errors.New(fmt.Sprintf("Route %s not found in map", id))
}
func (s *MemoryMap) GetRoutesToScaleDown() map[string]model.Route {
s.lock.Lock()
defer s.lock.Unlock()
deploymentToScaleDown := map[string]model.Route{}
for _, route := range s.m {
if _, ok := deploymentToScaleDown[route.GetId()]; !ok {
timeIdle := time.Now().Sub(route.GetLastUsed())
ttl := config.ServerlessTTLSeconds
if route.GetTTLSeconds() != nil {
ttl = *route.GetTTLSeconds()
}
// https://stackoverflow.com/a/41503910/5683655
if int64(timeIdle/time.Second) >= int64(ttl) {
deploymentToScaleDown[route.GetId()] = *route
}
}
}
return deploymentToScaleDown
}
| cleanOldDomainsFromMemoryMap |
admin.module.ts | import { NgModule, CUSTOM_ELEMENTS_SCHEMA } from '@angular/core';
import { RouterModule } from '@angular/router';
import { JhiLanguageService } from 'ng-jhipster';
import { JhiLanguageHelper } from 'app/core';
import { RoomieSharedModule } from 'app/shared';
import { RoomieElasticsearchReindexModule } from './elasticsearch-reindex/elasticsearch-reindex.module';
/* jhipster-needle-add-admin-module-import - JHipster will add admin modules imports here */
import {
adminState,
AuditsComponent,
UserMgmtComponent,
UserMgmtDetailComponent,
UserMgmtUpdateComponent,
UserMgmtDeleteDialogComponent,
LogsComponent,
JhiMetricsMonitoringComponent,
JhiHealthModalComponent,
JhiHealthCheckComponent,
JhiConfigurationComponent,
JhiDocsComponent
} from './';
@NgModule({
imports: [
RoomieSharedModule,
RouterModule.forChild(adminState),
RoomieElasticsearchReindexModule
/* jhipster-needle-add-admin-module - JHipster will add admin modules here */
],
declarations: [
AuditsComponent,
UserMgmtComponent,
UserMgmtDetailComponent,
UserMgmtUpdateComponent,
UserMgmtDeleteDialogComponent,
LogsComponent,
JhiConfigurationComponent,
JhiHealthCheckComponent,
JhiHealthModalComponent,
JhiDocsComponent,
JhiMetricsMonitoringComponent
],
providers: [{ provide: JhiLanguageService, useClass: JhiLanguageService }],
entryComponents: [UserMgmtDeleteDialogComponent, JhiHealthModalComponent],
schemas: [CUSTOM_ELEMENTS_SCHEMA]
})
export class | {
constructor(private languageService: JhiLanguageService, private languageHelper: JhiLanguageHelper) {
this.languageHelper.language.subscribe((languageKey: string) => {
if (languageKey !== undefined) {
this.languageService.changeLanguage(languageKey);
}
});
}
}
| RoomieAdminModule |
root.go | /*
Copyright ยฉ 2020 Jeeseung Han <[email protected]>
*/
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
const (
DefaultFilePath string = ".ju"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "ju",
Short: "์์ฃผ ์ฐ๋ ์ ํธ๋ฆฌํฐ ์ปค๋งจ๋",
Long: "Jeeseung's Toolbox CLI",
// Uncomment the following line if your bare application
// has an action associated with it:
// Run: func(cmd *cobra.Command, args []string) { },
}
// Execute adds all child commands to the root command and sets flags appropriately. | if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.ju.yaml)")
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".ju" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(DefaultFilePath)
fullPath := home + "/" + DefaultFilePath + ".yaml"
os.OpenFile(fullPath, os.O_RDONLY|os.O_CREATE, 0666)
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
} | // This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() { |
reorderableItem.js | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
import styled from 'styled-components';
/**
* Internal dependencies
*/
import useReorderable from './useReorderable';
const Container = styled.div`
z-index: 1;
`;
function | ({
children,
position,
onStartReordering,
disabled,
...props
}) {
const {
actions: { handleStartReordering },
} = useReorderable();
return (
<Container
{...(disabled
? null
: {
onPointerDown: handleStartReordering({
position,
onStartReordering,
}),
})}
{...props}
>
{children}
</Container>
);
}
ReorderableItem.propTypes = {
children: PropTypes.oneOfType([
PropTypes.arrayOf(PropTypes.node),
PropTypes.node,
]).isRequired,
position: PropTypes.number.isRequired,
onStartReordering: PropTypes.func,
disabled: PropTypes.bool,
};
export default ReorderableItem;
| ReorderableItem |
helper.py | import numpy as np, sys, os, random, pdb, json, uuid, time, argparse
from pprint import pprint
import logging, logging.config
from collections import defaultdict as ddict
# from ordered_set import OrderedSet
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
from torch.utils.data import DataLoader
from torch.nn import Parameter
# from torch_scatter import scatter_add
from .util_scatter import scatter_add
try:
from torch import irfft
from torch import rfft
except ImportError:
from torch.fft import irfft2
from torch.fft import rfft2
def rfft(x, d):
t = rfft2(x, dim=(-d))
return torch.stack((t.real, t.imag), -1)
def irfft(x, d, signal_sizes):
return irfft2(torch.complex(x[:, :, 0], x[:, :, 1]), s=signal_sizes, dim=(-d))
np.set_printoptions(precision=4)
def set_gpu(gpus):
|
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open(config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_combined_results(left_results, right_results):
results = {}
count = float(left_results['count'])
results['left_mr'] = round(left_results['mr'] / count, 5)
results['left_mrr'] = round(left_results['mrr'] / count, 5)
results['right_mr'] = round(right_results['mr'] / count, 5)
results['right_mrr'] = round(right_results['mrr'] / count, 5)
results['mr'] = round((left_results['mr'] + right_results['mr']) / (2 * count), 5)
results['mrr'] = round((left_results['mrr'] + right_results['mrr']) / (2 * count), 5)
for k in range(10):
results['left_hits@{}'.format(k + 1)] = round(left_results['hits@{}'.format(k + 1)] / count, 5)
results['right_hits@{}'.format(k + 1)] = round(right_results['hits@{}'.format(k + 1)] / count, 5)
results['hits@{}'.format(k + 1)] = round(
(left_results['hits@{}'.format(k + 1)] + right_results['hits@{}'.format(k + 1)]) / (2 * count), 5)
return results
def get_param(shape):
param = Parameter(torch.Tensor(*shape));
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1]
r2, i2 = b[..., 0], b[..., 1]
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)
def conj(a):
a[..., 1] = -a[..., 1]
return a
def cconv(a, b):
return irfft(com_mult(rfft(a, 1), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
return irfft(com_mult(conj(rfft(a, 1)), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def construct_adj(train_dataset, relation_dict_len):
edge_index, edge_type = [], []
if train_dataset.data.shape[1] == 3: # score_based
for sub, rel, obj in train_dataset.data:
edge_index.append((sub, obj))
edge_type.append(rel)
for sub, rel, obj in train_dataset.data:
edge_index.append((obj, sub))
edge_type.append(rel + relation_dict_len)
else: # classification-based
label = train_dataset.label_data
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((sub,e2_idx))
edge_type.append(rel)
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((e2_idx,sub))
edge_type.append(rel + relation_dict_len)
return edge_index,edge_type | """
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus |
user.py | # -*- coding: utf-8 -*-
from fastapi import APIRouter, Body, Security
from pycloud_api.crud.tenant import get_tenant_by_id
from pycloud_api.crud.user import get_current_user, check_free_username_and_email
from pycloud_api.models.schemas.tenant import Tenant, TenantInResponse
from pycloud_api.models.schemas.user import User, UserInDB, UserInResponse, UserInUpdate
router = APIRouter()
@router.get("/users/me", response_model=UserInResponse, tags=["users"])
async def | (
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
return UserInResponse(user=User(**current_user.dict()))
@router.put("/users/me", response_model=UserInResponse, tags=["users"])
async def update_current_user(
user: UserInUpdate = Body(..., embed=True),
current_user: UserInDB = Security(get_current_user, scopes=["edit:profile"]),
):
if user.username == current_user.username:
user.username = None
if user.email == current_user.email:
user.email = None
await check_free_username_and_email(user.username, user.email)
return UserInResponse(user=User(**current_user.dict()))
@router.get("/users/me/tenant", response_model=TenantInResponse, tags=["users"])
async def retrieve_current_user_tenant(
current_user: UserInDB = Security(get_current_user, scopes=["read:profile"])
):
tenant_by_id = await get_tenant_by_id(current_user.tenant)
return TenantInResponse(tenant=Tenant(**tenant_by_id.dict()))
| retrieve_current_user |
solution.py | from typing import Optional
class Solution:
def search(self, node: Optional[TreeNode], target: int):
if target - node.val in self.set and target - node.val != node.val:
self.flag = True
return
self.set.add(node.val)
if node.left:
self.search(node.left, target)
if node.right:
self.search(node.right, target)
def findTarget(self, root: Optional[TreeNode], k: int) -> bool: | return self.flag | self.set = set()
self.flag = False
self.search(root, k) |
Main.js | console.log("Begin load 'thera_bridge' module.");
require('./thera_bridge/DataConverter.js');
require('./thera_bridge/WorkerBridge.js');
| console.log("Load 'thera_bridge' module ok."); | |
test_api_status.py | # -*- coding: utf-8 -*-
from bdea.client import BDEAStatusResponse
class TestBDEAStatusResponse(object):
RESPONSE = {
'apikeystatus': 'active',
'commercial_credit_status': 'exhausted',
'commercial_credit_status_percent': 0,
'credits': '0',
'credits_time': '2015-10-24 13:15:08',
'request_status': 'ok',
'servertime': '2015-10-24 13:38:38',
'version': '1.3'
}
def test_empty_response_is_not_valid(self):
res = BDEAStatusResponse({})
assert res.status() == False
def test_empty_response_means_zero_credits(self):
res = BDEAStatusResponse({})
assert res.credits() == 0
def test_empty_response_means_exausted_credits(self):
res = BDEAStatusResponse({})
assert res.credit_status() == 'exhausted'
def | (self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == True
res.update({
'request_status': 'ok',
'apikeystatus': 'inactive'
})
assert BDEAStatusResponse(res).status() == False
res.update({
'request_status': 'fail',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == False
def test_credit_status(self):
res = self.RESPONSE.copy()
for ccs in ('good', 'low', 'exhausted'):
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'commercial_credit_status': ccs
})
assert BDEAStatusResponse(res).credit_status() == ccs
def test_credits(self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'credits': '100'
})
assert BDEAStatusResponse(res).credits() == 100
| test_request_status_and_apikey_status |
user_scope_teams_app_installation_request_builder.go | package item
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph"
i82ae2e3b126b0e2c2ce0859ada623300f796fca1ca04e53e697999c70dd48507 "github.com/microsoftgraph/msgraph-sdk-go/me/teamwork/installedapps/item/chat"
)
// UserScopeTeamsAppInstallationRequestBuilder builds and executes requests for operations under \me\teamwork\installedApps\{userScopeTeamsAppInstallation-id}
type UserScopeTeamsAppInstallationRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// UserScopeTeamsAppInstallationRequestBuilderDeleteOptions options for Delete
type UserScopeTeamsAppInstallationRequestBuilderDeleteOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// UserScopeTeamsAppInstallationRequestBuilderGetOptions options for Get
type UserScopeTeamsAppInstallationRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Request query parameters
Q *UserScopeTeamsAppInstallationRequestBuilderGetQueryParameters;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// UserScopeTeamsAppInstallationRequestBuilderGetQueryParameters the apps installed in the personal scope of this user.
type UserScopeTeamsAppInstallationRequestBuilderGetQueryParameters struct {
// Expand related entities
Expand []string;
// Select properties to be returned
Select []string;
}
// UserScopeTeamsAppInstallationRequestBuilderPatchOptions options for Patch
type UserScopeTeamsAppInstallationRequestBuilderPatchOptions struct {
//
Body *i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.UserScopeTeamsAppInstallation;
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
func (m *UserScopeTeamsAppInstallationRequestBuilder) Chat()(*i82ae2e3b126b0e2c2ce0859ada623300f796fca1ca04e53e697999c70dd48507.ChatRequestBuilder) {
return i82ae2e3b126b0e2c2ce0859ada623300f796fca1ca04e53e697999c70dd48507.NewChatRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// NewUserScopeTeamsAppInstallationRequestBuilderInternal instantiates a new UserScopeTeamsAppInstallationRequestBuilder and sets the default values.
func NewUserScopeTeamsAppInstallationRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*UserScopeTeamsAppInstallationRequestBuilder) {
m := &UserScopeTeamsAppInstallationRequestBuilder{
}
m.urlTemplate = "{+baseurl}/me/teamwork/installedApps/{userScopeTeamsAppInstallation_id}{?select,expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
}
// NewUserScopeTeamsAppInstallationRequestBuilder instantiates a new UserScopeTeamsAppInstallationRequestBuilder and sets the default values.
func NewUserScopeTeamsAppInstallationRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*UserScopeTeamsAppInstallationRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewUserScopeTeamsAppInstallationRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateDeleteRequestInformation the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) CreateDeleteRequestInformation(options *UserScopeTeamsAppInstallationRequestBuilderDeleteOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.DELETE
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil |
}
return requestInfo, nil
}
// CreateGetRequestInformation the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) CreateGetRequestInformation(options *UserScopeTeamsAppInstallationRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.Q != nil {
requestInfo.AddQueryParameters(*(options.Q))
}
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// CreatePatchRequestInformation the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) CreatePatchRequestInformation(options *UserScopeTeamsAppInstallationRequestBuilderPatchOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.PATCH
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body)
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Delete the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) Delete(options *UserScopeTeamsAppInstallationRequestBuilderDeleteOptions)(error) {
requestInfo, err := m.CreateDeleteRequestInformation(options);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(*requestInfo, nil, nil)
if err != nil {
return err
}
return nil
}
// Get the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) Get(options *UserScopeTeamsAppInstallationRequestBuilderGetOptions)(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.UserScopeTeamsAppInstallation, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendAsync(*requestInfo, func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.NewUserScopeTeamsAppInstallation() }, nil, nil)
if err != nil {
return nil, err
}
return res.(*i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.UserScopeTeamsAppInstallation), nil
}
// Patch the apps installed in the personal scope of this user.
func (m *UserScopeTeamsAppInstallationRequestBuilder) Patch(options *UserScopeTeamsAppInstallationRequestBuilderPatchOptions)(error) {
requestInfo, err := m.CreatePatchRequestInformation(options);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(*requestInfo, nil, nil)
if err != nil {
return err
}
return nil
}
| {
return nil, err
} |
fluo.py | from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,
QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import warnings, os, time
from skimage.io import imsave
import scipy.ndimage as ndi
from matplotlib.figure import Figure
from scipy.interpolate import interp1d
import matplotlib as mpl
warnings.filterwarnings("ignore")
from matplotlib import rc
rc('font', size=12)
rc('font', family='Arial')
# rc('font', serif='Times')
rc('pdf', fonttype=42)
# rc('text', usetex=True)
class profileAP_condMode(QWidget):
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):
super(profileAP_condMode, self).__init__(parent)
self.data_all = data_all
self.channel = channel
self.colors = colors
self.profileType = profileType
self.ylabel = ylabel
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.groupSelection = self.makeGroupSelectionBtns()
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(self.groupSelection,8,0,1,2)
lay.addWidget(self.applyBtn,9,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Fusion')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def makeGroupSelectionBtns(self):
group = QGroupBox("Groups to plot")
self.groupPlotBtn = []
for i in range(len(self.data_all)):
self.groupPlotBtn.append(QCheckBox('Group '+str(i)))
self.groupPlotBtn[-1].setChecked(True)
self.legendBtn = QCheckBox('Legend')
self.legendBtn.setChecked(False)
self.rawBtn = QCheckBox('Plot raw data')
self.rawBtn.setChecked(True)
lay = QGridLayout()
for i in range(len(self.data_all)):
lay.addWidget(self.groupPlotBtn[i],i,0,1,1)
lay.addWidget(self.legendBtn,0,1,1,1)
lay.addWidget(self.rawBtn,1,1,1,1)
group.setLayout(lay)
return group
def remakePlot(self):
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
lines = []
for i in range(n_groups):
# plot this group only if the button is checked
if self.groupPlotBtn[i].isChecked():
ydata_group = []
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
ydata_group.append(profiles_all[i][j][k])
# plot the raw data if the button is checked
if self.rawBtn.isChecked():
ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
# compute and plot mean and std
max_length = np.max([len(d) for d in ydata_group])
_mean = np.zeros(max_length)
_std = np.zeros(max_length)
for j in range(max_length):
datapoint = []
for data in ydata_group:
datapoint.append(data[j])
_mean[j] = np.nanmean(datapoint)
_std[j] = np.nanstd(datapoint)
line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]
ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')
lines.append(line)
# adjust axes lims
ax.set_ylim(0,None)
ax.set_xlim(0,None)
if self.XnormBtn.isChecked():
ax.set_xlim(0,100)
if self.YnormBtn.currentText() != 'No normalization':
ax.set_ylim(0,1)
# add legend
if self.legendBtn.isChecked():
l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])
l.get_frame().set_linewidth(0.0)
self.canvas.draw()
class profileAP_tlMode(QWidget):
#############
# TO BE IMPLEMENTED!!!
#############
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None):
super(profileAP_tlMode, self).__init__(parent)
self.data_all = data_all
self.n_groups = len(data_all)
self.channel = channel
self.colors = colors
self.profileType = profileType
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
###############################################
settings_group = QGroupBox('Plot settings')
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.aspectRatioBtn = QCheckBox('')
self.aspectRatioBtn.setChecked(True)
self.groupPlotBtn = QComboBox()
for i in range(len(self.data_all)):
self.groupPlotBtn.addItem('Group '+str(i+1))
lay = QGridLayout(self)
lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)
lay.addWidget(self.aspectRatioBtn,8,1,1,1)
lay.addWidget(QLabel('Current group:'),9,0,1,1)
lay.addWidget(self.groupPlotBtn,9,1,1,2)
settings_group.setLayout(lay)
#######################
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
self.saveBtn = QPushButton('Save Tif image')
self.saveBtn.clicked.connect(self.save_tif)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(settings_group,2,0,1,2)
lay.addWidget(self.applyBtn,3,0,1,2)
lay.addWidget(self.saveBtn,4,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Macintosh')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def | (self):
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
percs = [None,None]
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
self.percs = percs
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
# lines = []
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
# plot the selected group only
i = self.groupPlotBtn.currentIndex()
# compute and plot mean and std of the selected group
# prepare blank image
max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])
max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])
data_mean = np.zeros((max_t,max_l))
data_count = np.zeros((max_t,max_l))
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
data = np.nan_to_num(profiles_all[i][j][k])
data_mean[k,:] += data
data_count[k,:] += data!=0
# plot the raw data if the button is checked
# if self.rawBtn.isChecked():
# ax.plot(data_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
data_mean = data_mean.astype(np.float)/data_count.astype(np.float)
data_mean = np.nan_to_num(data_mean)
aspect = 'auto'
if self.aspectRatioBtn.isChecked():
aspect = 'equal'
ax.imshow(data_mean, aspect=aspect)
ax.set_title('Group '+str(i+1))
self.tif_data = data_mean
self.canvas.draw()
def save_tif(self):
name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')
if name != '':
### check file extension: allow to save in other formats, but bias towards tif
if os.path.splitext(name)[-1]!='.tif':
buttonReply = QMessageBox.question(self,'File format warning!','File format not recommended. Do you want to save the image as tif?')
if buttonReply == QMessageBox.Yes:
name = os.path.splitext(name)[0]+'.tif'
# convert the image into int16 with the right brightness and contrast
if self.percs[0]!=None:
self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])
imsave(name+'', self.tif_data.astype(np.uint16))
| remakePlot |
answer_test.go | /**
* @Author : s1m0n21
* @Description : Test answer
* @Project : leetcode-go
* @File : answer_test.go
* @Date : 2021/10/15 1:03 ไธๅ
*/
package _diagonal_traverse
| )
func TestAnswer(t *testing.T) {
tests := []struct {
input [][]int
expect []int
}{
{
[][]int{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
},
[]int{1, 2, 4, 7, 5, 3, 6, 8, 9},
},
{
[][]int{
{1, 2},
{3, 4},
},
[]int{1, 2, 3, 4},
},
{
[][]int{
{1},
},
[]int{1},
},
}
for i, test := range tests {
if actual := findDiagonalOrder(test.input); !reflect.DeepEqual(actual, test.expect) {
t.Errorf("%d: input = %+v, expect = %+v, actual = %+v", i, test.input, test.expect, actual)
}
}
} | import (
"reflect"
"testing" |
normalpath_unix_test.go | // Copyright 2020-2022 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package normalpath
import (
"fmt"
"os"
"path/filepath"
"sort"
"testing"
"github.com/element-of-surprise/buf/private/pkg/stringutil"
"github.com/stretchr/testify/assert"
)
func TestNormalizeAndValidate(t *testing.T) {
t.Parallel()
path, err := NormalizeAndValidate("")
assert.NoError(t, err)
assert.Equal(t, ".", path)
path, err = NormalizeAndValidate(".")
assert.NoError(t, err)
assert.Equal(t, ".", path)
path, err = NormalizeAndValidate("./.")
assert.NoError(t, err)
assert.Equal(t, ".", path)
path, err = NormalizeAndValidate("./foo")
assert.NoError(t, err)
assert.Equal(t, "foo", path)
_, err = NormalizeAndValidate("/foo")
assert.Error(t, err)
_, err = NormalizeAndValidate("../foo")
assert.Error(t, err)
}
func TestNormalize(t *testing.T) {
t.Parallel()
assert.Equal(t, ".", Normalize(""))
assert.Equal(t, ".", Normalize("."))
assert.Equal(t, ".", Normalize("./."))
assert.Equal(t, "foo", Normalize("./foo"))
assert.Equal(t, "../foo", Normalize("../foo"))
assert.Equal(t, "../foo", Normalize("../foo"))
assert.Equal(t, "foo", Normalize("foo/"))
assert.Equal(t, "foo", Normalize("./foo/"))
assert.Equal(t, "/foo", Normalize("/foo"))
assert.Equal(t, "/foo", Normalize("/foo/"))
assert.Equal(t, "/foo/bar", Normalize("/foo/../foo/bar"))
}
func TestUnnormalize(t *testing.T) {
t.Parallel()
assert.Equal(t, "", Unnormalize(""))
assert.Equal(t, ".", Unnormalize("."))
assert.Equal(t, "/foo", Unnormalize("/foo"))
}
func TestBase(t *testing.T) {
t.Parallel()
testBase(t, ".", ".")
testBase(t, ".", ".")
testBase(t, ".", "./.")
testBase(t, "foo", "./foo")
testBase(t, "bar", "./foo/bar")
testBase(t, "bar", "../foo/bar")
testBase(t, "foo", "/foo")
testBase(t, "bar", "/foo/bar")
}
func | (t *testing.T, expected string, input string) {
if os.PathSeparator == '/' {
assert.Equal(t, expected, filepath.Base(input))
}
assert.Equal(t, expected, Base(input))
}
func TestDir(t *testing.T) {
t.Parallel()
testDir(t, ".", "")
testDir(t, ".", ".")
testDir(t, "/", "/")
testDir(t, ".", "./")
testDir(t, ".", "./.")
testDir(t, ".", "foo")
testDir(t, ".", "./foo")
testDir(t, "foo", "./foo/bar")
testDir(t, "../foo", "../foo/bar")
testDir(t, "../foo", "../foo/bar/../..")
testDir(t, "/foo", "/foo/bar")
testDir(t, "/", "/foo")
}
func testDir(t *testing.T, expected string, input string) {
if os.PathSeparator == '/' {
assert.Equal(t, expected, filepath.Dir(input))
}
assert.Equal(t, expected, Dir(input))
}
func TestExt(t *testing.T) {
t.Parallel()
testExt(t, "", "")
testExt(t, ".", ".")
testExt(t, ".txt", ".txt")
testExt(t, ".txt", ".js.txt")
testExt(t, "", "foo")
testExt(t, ".txt", "foo.txt")
testExt(t, ".txt", "foo.js.txt")
testExt(t, "", "./foo")
testExt(t, ".txt", "./foo.txt")
testExt(t, ".txt", "./foo.js.txt")
testExt(t, "", "./foo/bar")
testExt(t, ".txt", "./foo/bar.txt")
testExt(t, ".txt", "./foo/bar.txt")
testExt(t, ".txt", "./foo/bar.js.txt")
testExt(t, "", "../foo/bar")
testExt(t, ".txt", "../foo/bar.txt")
testExt(t, ".txt", "../foo/bar.js.txt")
}
func testExt(t *testing.T, expected string, input string) {
if os.PathSeparator == '/' {
assert.Equal(t, expected, filepath.Ext(input))
}
assert.Equal(t, expected, Ext(input))
}
func TestJoin(t *testing.T) {
t.Parallel()
testJoin(t, "", "")
testJoin(t, "", "", "")
testJoin(t, ".", ".", ".")
testJoin(t, ".", "", ".", "")
testJoin(t, "foo/bar", "foo", "./bar")
testJoin(t, "foo", "foo", "./bar", "..")
testJoin(t, "/foo/bar", "/foo", "./bar")
testJoin(t, "/foo", "/foo", "./bar", "..")
testJoin(t, "bar", ".", "bar")
}
func testJoin(t *testing.T, expected string, input ...string) {
if os.PathSeparator == '/' {
assert.Equal(t, expected, filepath.Join(input...))
}
assert.Equal(t, expected, Join(input...))
}
func TestRel(t *testing.T) {
t.Parallel()
testRel(t, ".", "", "")
testRel(t, ".", "", ".")
testRel(t, ".", ".", "")
testRel(t, ".", ".", ".")
testRel(t, ".", "foo", "foo")
testRel(t, "foo", ".", "foo")
testRel(t, "foo", ".", "./foo")
testRel(t, "foo/bar", ".", "foo/bar")
testRel(t, "bar", "/foo", "/foo/bar")
testRel(t, "bar", "foo", "foo/bar")
testRel(t, "baz", "/foo/./bar", "/foo/bar/baz")
testRel(t, "baz", "foo/./bar", "foo/bar/baz")
testRelError(t, "", "..", "foo/bar")
}
func testRel(t *testing.T, expected string, basepath string, targpath string) {
if os.PathSeparator == '/' {
rel, err := filepath.Rel(basepath, targpath)
assert.NoError(t, err)
assert.Equal(t, expected, rel)
}
rel, err := Rel(basepath, targpath)
assert.NoError(t, err)
assert.Equal(t, expected, rel)
}
func testRelError(t *testing.T, expected string, basepath string, targpath string) {
if os.PathSeparator == '/' {
rel, err := filepath.Rel(basepath, targpath)
assert.Error(t, err)
assert.Equal(t, expected, rel)
}
rel, err := Rel(basepath, targpath)
assert.Error(t, err)
assert.Equal(t, expected, rel)
}
func TestComponents(t *testing.T) {
t.Parallel()
testComponents(t, "", ".")
testComponents(t, ".", ".")
testComponents(t, "foo", "foo")
testComponents(t, "foo/bar", "foo", "bar")
testComponents(t, "foo/bar/../baz", "foo", "bar", "..", "baz")
testComponents(t, "/foo/bar", "/", "foo", "bar")
testComponents(t, "./foo/bar", ".", "foo", "bar")
testComponents(t, "../foo/bar", "..", "foo", "bar")
}
func testComponents(t *testing.T, path string, expected ...string) {
assert.Equal(t, expected, Components(path))
}
func TestStripComponents(t *testing.T) {
t.Parallel()
testStripComponents(t, 0, "", true, "")
testStripComponents(t, 0, "foo", true, "foo")
testStripComponents(t, 0, "foo", true, "foo")
testStripComponents(t, 1, "", false, "foo")
testStripComponents(t, 1, "bar", true, "foo/bar")
testStripComponents(t, 1, "bar/baz", true, "foo/bar/baz")
testStripComponents(t, 2, "baz", true, "foo/bar/baz")
testStripComponents(t, 1, "bar/baz/bat", true, "foo/bar/baz/bat")
testStripComponents(t, 2, "baz/bat", true, "foo/bar/baz/bat")
testStripComponents(t, 3, "bat", true, "foo/bar/baz/bat")
testStripComponents(t, 4, "", false, "foo/bar/baz/bat")
testStripComponents(t, 5, "", false, "foo/bar/baz/bat")
}
func testStripComponents(t *testing.T, count int, expected string, expectedOK bool, path string) {
actual, ok := StripComponents(path, uint32(count))
assert.Equal(t, expectedOK, ok)
assert.Equal(t, expected, actual)
}
func TestByDir(t *testing.T) {
t.Parallel()
assert.Equal(
t,
map[string][]string{
"one": {
"one/1.txt",
"one/2.txt",
"one/3.txt",
},
"two": {
"two/1.txt",
"two/2.txt",
"two/3.txt",
},
},
ByDir(
"one/2.txt",
"one/1.txt",
"two/2.txt",
"one/3.txt",
"two/1.txt",
"two/3.txt",
),
)
assert.Equal(
t,
map[string][]string{
".": {
"1.txt",
"2.txt",
"3.txt",
},
"two": {
"two/1.txt",
"two/2.txt",
"two/3.txt",
},
},
ByDir(
"2.txt",
"1.txt",
"3.txt",
"two/3.txt",
"two/2.txt",
"two/1.txt",
),
)
}
func TestContainsPath(t *testing.T) {
testContainsPath(t, false, "a.proto", "a.proto")
testContainsPath(t, true, ".", "a.proto")
testContainsPath(t, false, "a.proto", ".")
testContainsPath(t, false, ".", ".")
testContainsPath(t, true, ".", "a/b.proto")
testContainsPath(t, true, ".", "a/b")
testContainsPath(t, false, "a", "ab/c")
testContainsPath(t, true, "a", "a/b/c")
testContainsPath(t, false, "b", "a/b/c")
testContainsPath(t, true, "b", "b/b/c")
testContainsPath(t, true, "b", "b/a/c")
}
func testContainsPath(t *testing.T, expected bool, value string, path string) {
assert.Equal(t, expected, ContainsPath(value, path, Relative), fmt.Sprintf("%s %s", value, path))
}
func TestEqualsOrContainsPath(t *testing.T) {
testEqualsOrContainsPath(t, true, "a.proto", "a.proto")
testEqualsOrContainsPath(t, true, ".", "a.proto")
testEqualsOrContainsPath(t, false, "a.proto", ".")
testEqualsOrContainsPath(t, true, ".", "a/b.proto")
testEqualsOrContainsPath(t, true, ".", "a/b")
testEqualsOrContainsPath(t, false, "a", "ab/c")
testEqualsOrContainsPath(t, true, "a", "a/b/c")
testEqualsOrContainsPath(t, false, "b", "a/b/c")
testEqualsOrContainsPath(t, true, "b", "b/b/c")
testEqualsOrContainsPath(t, true, "b", "b/a/c")
}
func testEqualsOrContainsPath(t *testing.T, expected bool, value string, path string) {
assert.Equal(t, expected, EqualsOrContainsPath(value, path, Relative), fmt.Sprintf("%s %s", value, path))
}
func TestMapHasEqualOrContainingPath(t *testing.T) {
testMapHasEqualOrContainingPath(t, true, "a.proto", "a.proto")
testMapHasEqualOrContainingPath(t, false, ".", "a.proto")
testMapHasEqualOrContainingPath(t, true, "a.proto", ".")
testMapHasEqualOrContainingPath(t, true, "a/b.proto", ".")
testMapHasEqualOrContainingPath(t, true, "a/b", ".")
testMapHasEqualOrContainingPath(t, false, "ab/c", "a", "b")
testMapHasEqualOrContainingPath(t, true, "a/b/c", "a", "b")
testMapHasEqualOrContainingPath(t, false, "a/b/c", "b")
testMapHasEqualOrContainingPath(t, true, "b/b/c", "b")
testMapHasEqualOrContainingPath(t, true, "b/a/c", "b")
testMapHasEqualOrContainingPath(t, true, "b/b/c", "b", ".")
}
func testMapHasEqualOrContainingPath(t *testing.T, expected bool, path string, keys ...string) {
keyMap := stringutil.SliceToMap(keys)
assert.Equal(t, expected, MapHasEqualOrContainingPath(keyMap, path, Relative), fmt.Sprintf("%s %v", path, keys))
}
func TestMapAllEqualOrContainingPaths(t *testing.T) {
testMapAllEqualOrContainingPaths(t, []string{"a.proto"}, "a.proto", "a.proto")
testMapAllEqualOrContainingPaths(t, nil, ".", "a.proto")
testMapAllEqualOrContainingPaths(t, []string{"."}, "a.proto", ".")
testMapAllEqualOrContainingPaths(t, []string{"."}, "a/b.proto", ".")
testMapAllEqualOrContainingPaths(t, []string{"."}, "a/b", ".")
testMapAllEqualOrContainingPaths(t, nil, "ab/c", "a", "b")
testMapAllEqualOrContainingPaths(t, []string{"a"}, "a/b/c", "a", "b")
testMapAllEqualOrContainingPaths(t, nil, "a/b/c", "b")
testMapAllEqualOrContainingPaths(t, []string{"b"}, "b/b/c", "b")
testMapAllEqualOrContainingPaths(t, []string{"b"}, "b/a/c", "b")
testMapAllEqualOrContainingPaths(t, []string{"b", "."}, "b/b/c", "b", ".")
testMapAllEqualOrContainingPaths(t, []string{"b", "b/b", "."}, "b/b/c", "b", "b/b", ".")
}
func testMapAllEqualOrContainingPaths(t *testing.T, expected []string, path string, keys ...string) {
if expected == nil {
expected = make([]string, 0)
}
sort.Strings(expected)
keyMap := stringutil.SliceToMap(keys)
assert.Equal(t, expected, MapAllEqualOrContainingPaths(keyMap, path, Relative), fmt.Sprintf("%s %v", path, keys))
}
func TestContainsPathAbs(t *testing.T) {
testContainsPathAbs(t, false, "/a.proto", "/a.proto")
testContainsPathAbs(t, true, "/", "/a.proto")
testContainsPathAbs(t, false, "/a.proto", "/")
testContainsPathAbs(t, false, "/", "/")
testContainsPathAbs(t, true, "/", "/a/b.proto")
testContainsPathAbs(t, true, "/", "/a/b")
testContainsPathAbs(t, false, "/a", "/ab/c")
testContainsPathAbs(t, true, "/a", "/a/b/c")
testContainsPathAbs(t, false, "/b", "/a/b/c")
testContainsPathAbs(t, true, "/b", "/b/b/c")
testContainsPathAbs(t, true, "/b", "/b/a/c")
}
func testContainsPathAbs(t *testing.T, expected bool, value string, path string) {
assert.Equal(t, expected, ContainsPath(value, path, Absolute), fmt.Sprintf("%s %s", value, path))
}
func TestEqualsOrContainsPathAbs(t *testing.T) {
testEqualsOrContainsPathAbs(t, true, "/a.proto", "/a.proto")
testEqualsOrContainsPathAbs(t, true, "/", "/a.proto")
testEqualsOrContainsPathAbs(t, false, "a.proto", "/")
testEqualsOrContainsPathAbs(t, true, "/", "/a/b.proto")
testEqualsOrContainsPathAbs(t, true, "/", "/a/b")
testEqualsOrContainsPathAbs(t, false, "/a", "/ab/c")
testEqualsOrContainsPathAbs(t, true, "/a", "/a/b/c")
testEqualsOrContainsPathAbs(t, false, "/b", "/a/b/c")
testEqualsOrContainsPathAbs(t, true, "/b", "/b/b/c")
testEqualsOrContainsPathAbs(t, true, "/b", "/b/a/c")
}
func testEqualsOrContainsPathAbs(t *testing.T, expected bool, value string, path string) {
assert.Equal(t, expected, EqualsOrContainsPath(value, path, Absolute), fmt.Sprintf("%s %s", value, path))
}
func TestMapHasEqualOrContainingPathAbs(t *testing.T) {
testMapHasEqualOrContainingPathAbs(t, true, "/a.proto", "/a.proto")
testMapHasEqualOrContainingPathAbs(t, false, "/", "/a.proto")
testMapHasEqualOrContainingPathAbs(t, true, "/a.proto", "/")
testMapHasEqualOrContainingPathAbs(t, true, "/a/b.proto", "/")
testMapHasEqualOrContainingPathAbs(t, true, "/a/b", "/")
testMapHasEqualOrContainingPathAbs(t, false, "/ab/c", "/a", "/b")
testMapHasEqualOrContainingPathAbs(t, true, "/a/b/c", "/a", "/b")
testMapHasEqualOrContainingPathAbs(t, false, "/a/b/c", "/b")
testMapHasEqualOrContainingPathAbs(t, true, "/b/b/c", "/b")
testMapHasEqualOrContainingPathAbs(t, true, "/b/a/c", "/b")
testMapHasEqualOrContainingPathAbs(t, true, "/b/b/c", "/b", "/")
}
func testMapHasEqualOrContainingPathAbs(t *testing.T, expected bool, path string, keys ...string) {
keyMap := stringutil.SliceToMap(keys)
assert.Equal(t, expected, MapHasEqualOrContainingPath(keyMap, path, Absolute), fmt.Sprintf("%s %v", path, keys))
}
func TestMapAllEqualOrContainingPathsAbs(t *testing.T) {
testMapAllEqualOrContainingPathsAbs(t, []string{"/a.proto"}, "/a.proto", "/a.proto")
testMapAllEqualOrContainingPathsAbs(t, nil, "/", "/a.proto")
testMapAllEqualOrContainingPathsAbs(t, []string{"/"}, "/a.proto", "/")
testMapAllEqualOrContainingPathsAbs(t, []string{"/"}, "/a/b.proto", "/")
testMapAllEqualOrContainingPathsAbs(t, []string{"/"}, "/a/b", "/")
testMapAllEqualOrContainingPathsAbs(t, nil, "/ab/c", "/a", "/b")
testMapAllEqualOrContainingPathsAbs(t, []string{"/a"}, "/a/b/c", "/a", "/b")
testMapAllEqualOrContainingPathsAbs(t, nil, "/a/b/c", "/b")
testMapAllEqualOrContainingPathsAbs(t, []string{"/b"}, "/b/b/c", "/b")
testMapAllEqualOrContainingPathsAbs(t, []string{"/b"}, "/b/a/c", "/b")
testMapAllEqualOrContainingPathsAbs(t, []string{"/b", "/"}, "/b/b/c", "/b", "/")
testMapAllEqualOrContainingPathsAbs(t, []string{"/b", "/b/b", "/"}, "/b/b/c", "/b", "/b/b", "/")
}
func testMapAllEqualOrContainingPathsAbs(t *testing.T, expected []string, path string, keys ...string) {
if expected == nil {
expected = make([]string, 0)
}
sort.Strings(expected)
keyMap := stringutil.SliceToMap(keys)
assert.Equal(t, expected, MapAllEqualOrContainingPaths(keyMap, path, Absolute), fmt.Sprintf("%s %v", path, keys))
}
| testBase |
__init__.py | from . import models
from . import controllers |
||
tests.rs | // Copyright 2019 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// spell-checker:ignore upup
#![allow(clippy::too_many_lines)]
use exonum_crypto::Hash;
use rand::{thread_rng, Rng};
use serde_json::{self, json};
use std::cmp;
use super::{key::ProofListKey, tree_height_by_length, ListProof, ListProofError, ProofListIndex};
use crate::{access::AccessExt, BinaryValue, Database, HashTag, ObjectHash, TemporaryDB};
const IDX_NAME: &str = "idx_name";
fn random_values<R: Rng>(rng: &mut R, len: usize) -> Vec<Vec<u8>> {
use std::collections::HashSet;
let mut exists = HashSet::new();
let generator = |_| {
let mut new_val: Vec<u8> = vec![0; 10];
rng.fill_bytes(&mut new_val);
while exists.contains(&new_val) {
rng.fill_bytes(&mut new_val);
}
exists.insert(new_val.clone());
new_val
};
(0..len).map(generator).collect::<Vec<_>>()
}
#[test]
fn list_methods() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
assert!(index.is_empty());
assert_eq!(index.len(), 0);
index.push(vec![1]);
assert!(!index.is_empty());
assert_eq!(index.len(), 1);
index.push(vec![2]);
assert_eq!(index.len(), 2);
index.extend(vec![vec![3]]);
assert_eq!(index.len(), 3);
assert_eq!(index.get(0), Some(vec![1]));
assert_eq!(index.get(1), Some(vec![2]));
assert_eq!(index.get(2), Some(vec![3]));
}
#[test]
fn extend_is_equivalent_to_sequential_pushes() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
for _ in 0..10 {
index.clear();
let values: [u8; 32] = thread_rng().gen();
for &value in &values {
index.push(value);
}
let hash_after_pushes = index.object_hash();
index.clear();
index.extend(values.iter().cloned());
assert_eq!(index.object_hash(), hash_after_pushes);
}
// Try extending list in several calls.
for _ in 0..10 {
index.clear();
let values: [u8; 32] = thread_rng().gen();
for &value in &values {
index.push(value);
}
let hash_after_pushes = index.object_hash();
index.clear();
let mut iter = values.iter().cloned();
index.extend(iter.by_ref().take(5));
index.extend(iter.by_ref().take(8));
index.extend(iter.by_ref().take(3));
index.extend(iter);
assert_eq!(index.object_hash(), hash_after_pushes);
}
// Try mixing extensions and pushes
for _ in 0..10 {
index.clear();
let values: [u8; 32] = thread_rng().gen();
for &value in &values {
index.push(value);
}
let hash_after_pushes = index.object_hash();
index.clear();
let mut iter = values.iter().cloned();
index.extend(iter.by_ref().take(5));
for value in iter.by_ref().take(3) {
index.push(value);
}
index.extend(iter.by_ref().take(7));
index.push(iter.by_ref().next().unwrap());
index.extend(iter);
assert_eq!(index.object_hash(), hash_after_pushes);
}
}
#[test]
fn tree_height() |
#[test]
fn iter() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut list_index = fork.get_proof_list(IDX_NAME);
list_index.extend(vec![1_u8, 2, 3]);
assert_eq!(list_index.iter().collect::<Vec<u8>>(), vec![1, 2, 3]);
assert_eq!(list_index.iter_from(0).collect::<Vec<u8>>(), vec![1, 2, 3]);
assert_eq!(list_index.iter_from(1).collect::<Vec<u8>>(), vec![2, 3]);
assert_eq!(
list_index.iter_from(3).collect::<Vec<u8>>(),
Vec::<u8>::new()
);
}
#[test]
fn simple_proof() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
let h0 = HashTag::hash_leaf(&2_u64.to_bytes());
let h1 = HashTag::hash_leaf(&4_u64.to_bytes());
let h2 = HashTag::hash_leaf(&6_u64.to_bytes());
let h01 = HashTag::hash_node(&h0, &h1);
let h22 = HashTag::hash_single_node(&h2);
let h012 = HashTag::hash_node(&h01, &h22);
assert_eq!(index.object_hash(), HashTag::empty_list_hash());
index.push(2_u64);
assert_eq!(index.object_hash(), HashTag::hash_list_node(1, h0));
let proof = index.get_proof(0);
assert_eq!(proof, ListProof::new(vec![(0, 2_u64)], index.len()));
let proof = proof.check().unwrap();
assert_eq!(proof.index_hash(), index.object_hash());
assert_eq!(*proof.entries(), [(0, 2)]);
index.push(4_u64);
assert_eq!(index.object_hash(), HashTag::hash_list_node(2, h01));
let proof = index.get_proof(0);
assert_eq!(proof, {
let mut proof = ListProof::new(vec![(0, 2_u64)], index.len());
proof.push_hash(1, 1, h1);
proof
});
let proof = proof.check().unwrap();
assert_eq!(proof.index_hash(), index.object_hash());
assert_eq!(*proof.entries(), [(0, 2)]);
let proof = index.get_proof(1);
assert_eq!(proof, {
let mut proof = ListProof::new(vec![(1, 4_u64)], index.len());
proof.push_hash(1, 0, h0);
proof
});
let proof = proof.check().unwrap();
assert_eq!(proof.index_hash(), index.object_hash());
assert_eq!(*proof.entries(), [(1, 4)]);
let proof = index.get_range_proof(0..2);
assert_eq!(
proof,
ListProof::new(vec![(0, 2_u64), (1, 4_u64)], index.len())
);
assert_eq!(
*proof
.check_against_hash(index.object_hash())
.unwrap()
.entries(),
[(0, 2), (1, 4)]
);
index.push(6_u64);
assert_eq!(index.object_hash(), HashTag::hash_list_node(3, h012));
let proof = index.get_proof(0);
assert_eq!(proof, {
let mut proof = ListProof::new(vec![(0, 2_u64)], index.len());
proof.push_hash(1, 1, h1);
proof.push_hash(2, 1, h22);
proof
});
assert_eq!(
*proof
.check_against_hash(index.object_hash())
.unwrap()
.entries(),
[(0, 2)]
);
let proof = index.get_range_proof(1..3);
assert_eq!(proof, {
let mut proof = ListProof::new(vec![(1, 4_u64), (2, 6_u64)], index.len());
proof.push_hash(1, 0, h0);
proof
});
assert_eq!(
*proof
.check_against_hash(index.object_hash())
.unwrap()
.entries(),
[(1, 4_u64), (2, 6_u64)]
);
let proof = index.get_range_proof(0..2);
assert_eq!(proof, {
let mut proof = ListProof::new(vec![(0, 2_u64), (1, 4_u64)], index.len());
proof.push_hash(2, 1, h22);
proof
});
assert_eq!(
*proof
.check_against_hash(index.object_hash())
.unwrap()
.entries(),
[(0, 2_u64), (1, 4_u64)]
);
}
#[test]
fn proofs_in_empty_list() {
let db = TemporaryDB::new();
let fork = db.fork();
let index: ProofListIndex<_, u32> = fork.get_proof_list(IDX_NAME);
let proof = index.get_range_proof(..);
proof.check_against_hash(index.object_hash()).unwrap();
assert!(proof.check_against_hash(index.object_hash()).is_ok());
let proof = index.get_range_proof(..0);
assert!(proof.check_against_hash(index.object_hash()).is_ok());
}
#[test]
fn empty_proof_ranges() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
index.extend(vec![1_u32, 2, 3]);
let proof = index.get_range_proof(1..1);
assert!(proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.is_empty());
let proof = index.get_range_proof(1..=0);
assert!(proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.is_empty());
}
#[test]
fn random_proofs() {
const LIST_SIZE: usize = 1 << 10;
const MAX_RANGE_SIZE: u64 = 128;
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
let mut rng = thread_rng();
let values = random_values(&mut rng, LIST_SIZE);
for value in &values {
index.push(value.clone());
}
let index_hash = index.object_hash();
for _ in 0..10 {
let start = rng.gen_range(0, LIST_SIZE as u64);
let end = rng.gen_range(start + 1, LIST_SIZE as u64 + 1);
let end = cmp::min(end, start + MAX_RANGE_SIZE);
let range_proof = index.get_range_proof(start..end);
let checked_proof = range_proof.check_against_hash(index_hash).unwrap();
let expected_entries = (start..end).zip(&values[start as usize..end as usize]);
assert!(checked_proof
.entries()
.iter()
.map(|(i, value)| (*i, value))
.eq(expected_entries));
}
}
#[test]
fn index_and_proof_roots() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
assert_eq!(index.object_hash(), HashTag::empty_list_hash());
let h1 = HashTag::hash_leaf(&[1, 2]);
let h2 = HashTag::hash_leaf(&[2, 3]);
let h3 = HashTag::hash_leaf(&[3, 4]);
let h4 = HashTag::hash_leaf(&[4, 5]);
let h5 = HashTag::hash_leaf(&[5, 6]);
let h6 = HashTag::hash_leaf(&[6, 7]);
let h7 = HashTag::hash_leaf(&[7, 8]);
let h8 = HashTag::hash_leaf(&[8, 9]);
let h12 = HashTag::hash_node(&h1, &h2);
let h3up = HashTag::hash_single_node(&h3);
let h123 = HashTag::hash_node(&h12, &h3up);
let h34 = HashTag::hash_node(&h3, &h4);
let h1234 = HashTag::hash_node(&h12, &h34);
let h5up = HashTag::hash_single_node(&h5);
let h5upup = HashTag::hash_single_node(&h5up);
let h12345 = HashTag::hash_node(&h1234, &h5upup);
let h56 = HashTag::hash_node(&h5, &h6);
let h56up = HashTag::hash_single_node(&h56);
let h123456 = HashTag::hash_node(&h1234, &h56up);
let h7up = HashTag::hash_single_node(&h7);
let h567 = HashTag::hash_node(&h56, &h7up);
let h1234567 = HashTag::hash_node(&h1234, &h567);
let h78 = HashTag::hash_node(&h7, &h8);
let h5678 = HashTag::hash_node(&h56, &h78);
let h12345678 = HashTag::hash_node(&h1234, &h5678);
let expected_hash_comb = vec![
(vec![1, 2], HashTag::hash_list_node(1, h1), 0),
(vec![2, 3], HashTag::hash_list_node(2, h12), 1),
(vec![3, 4], HashTag::hash_list_node(3, h123), 2),
(vec![4, 5], HashTag::hash_list_node(4, h1234), 3),
(vec![5, 6], HashTag::hash_list_node(5, h12345), 4),
(vec![6, 7], HashTag::hash_list_node(6, h123456), 5),
(vec![7, 8], HashTag::hash_list_node(7, h1234567), 6),
(vec![8, 9], HashTag::hash_list_node(8, h12345678), 7),
];
for (inserted, exp_root, proof_ind) in expected_hash_comb {
index.push(inserted);
assert_eq!(index.object_hash(), exp_root);
let range_proof = index.get_range_proof(proof_ind..=proof_ind);
assert_eq!(
range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
1
);
let js = serde_json::to_string(&range_proof).unwrap();
let deserialized_proof: ListProof<Vec<u8>> = serde_json::from_str(&js).unwrap();
assert_eq!(deserialized_proof, range_proof);
let range_proof = index.get_range_proof(..=proof_ind);
assert_eq!(
range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
(proof_ind + 1) as usize
);
let js = serde_json::to_string(&range_proof).unwrap();
let deserialized_proof: ListProof<Vec<u8>> = serde_json::from_str(&js).unwrap();
assert_eq!(deserialized_proof, range_proof);
let range_proof = index.get_range_proof(0..1);
assert_eq!(
range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
1
);
let js = serde_json::to_string(&range_proof).unwrap();
let deserialized_proof: ListProof<Vec<u8>> = serde_json::from_str(&js).unwrap();
assert_eq!(deserialized_proof, range_proof);
}
let range_proof = index.get_range_proof(..);
let entries = range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries();
let expected_entries = (0..8).zip(vec![
vec![1, 2],
vec![2, 3],
vec![3, 4],
vec![4, 5],
vec![5, 6],
vec![6, 7],
vec![7, 8],
vec![8, 9],
]);
assert!(entries
.iter()
.zip(expected_entries)
.all(|(actual, expected)| *actual == expected));
let mut range_proof = index.get_range_proof(3..5);
assert_eq!(
range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
2
);
range_proof = index.get_range_proof(2..6);
assert_eq!(
range_proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
4
);
assert_eq!(index.get(0), Some(vec![1, 2]));
}
#[test]
#[should_panic(expected = "the range start is 2, but the range end is 1")]
fn proof_illegal_range() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
for i in 0_u8..4 {
index.push(vec![i]);
}
index.get_range_proof(2..1);
}
#[test]
#[should_panic(expected = "the range start is 2, but the range end is 1")]
fn proof_illegal_inclusive_range() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
for i in 0_u8..4 {
index.push(vec![i]);
}
index.get_range_proof(2..=0); // `2..=1` is a legal empty range; cf. `Vec` slicing
}
#[test]
fn ranges_work_similar_to_vec_slicing() {
use std::{
fmt,
ops::RangeBounds,
panic::{self, UnwindSafe},
slice::SliceIndex,
};
#[allow(clippy::needless_pass_by_value)] // references to ranges look awkward
fn check_bounds(
v: &[u32],
slice_bounds: impl SliceIndex<[u32], Output = [u32]> + Clone + UnwindSafe + fmt::Debug,
index_bounds: impl RangeBounds<u64> + UnwindSafe,
) {
let panic_hook = panic::take_hook();
panic::set_hook(Box::new(|_| {}));
let slice_bounds_ = slice_bounds.clone();
let slicing_res = panic::catch_unwind(|| v[slice_bounds_].to_vec());
let proof_res = panic::catch_unwind(|| {
let fork = TemporaryDB::new().fork();
let mut index = fork.get_proof_list(IDX_NAME);
index.extend(v.to_vec());
(index.object_hash(), index.get_range_proof(index_bounds))
});
panic::set_hook(panic_hook);
if let Ok((index_hash, proof)) = proof_res {
let checked_proof = proof.check_against_hash(index_hash).unwrap();
match slicing_res {
Ok(slice) => {
assert_eq!(
checked_proof
.entries()
.iter()
.map(|(_, value)| *value)
.collect::<Vec<_>>(),
slice
);
}
Err(e) => {
// Slicing is more strict if the range does not completely fit
// in the slice indexes.
assert!(
e.downcast_ref::<String>().unwrap().contains("out of range"),
"{:?}",
slice_bounds
);
}
}
} else {
assert!(slicing_res.is_err(), "{:?}", slice_bounds);
}
}
let v = vec![1, 2, 3, 4, 5];
check_bounds(&v, .., ..);
for start in 0_usize..10 {
for end in 0_usize..10 {
check_bounds(&v, start..end, (start as u64)..(end as u64));
check_bounds(&v, start..=end, (start as u64)..=(end as u64));
if start == 0 {
check_bounds(&v, ..end, ..(end as u64));
check_bounds(&v, ..=end, ..=(end as u64));
}
if end == 0 && start <= v.len() {
check_bounds(&v, start.., (start as u64)..);
}
}
}
}
#[test]
fn proof_with_range_start_exceeding_list_size() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
for i in 0_u8..4 {
index.push(vec![i]);
}
let proof = index.get_range_proof(8..10_000_000);
assert!(proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.is_empty());
let proof = index.get_range_proof(8..);
assert!(proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.is_empty());
}
#[test]
fn proof_with_range_end_exceeding_list_size() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
for i in 0_u8..4 {
index.push(vec![i]);
}
let proof = index.get_range_proof(2..10);
assert_eq!(
proof
.check_against_hash(index.object_hash())
.unwrap()
.entries()
.len(),
2 // elements 2 and 3
);
}
#[test]
fn setting_elements_leads_to_correct_list_hash() {
let db = TemporaryDB::new();
let hash1 = {
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
list.push(vec![1]);
list.push(vec![2]);
list.push(vec![3]);
list.push(vec![4]);
list.set(0, vec![4]);
list.set(1, vec![7]);
list.set(2, vec![5]);
list.set(3, vec![1]);
list.object_hash()
};
let hash2 = {
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
list.push(vec![4]);
list.push(vec![7]);
list.push(vec![5]);
list.push(vec![1]);
list.object_hash()
};
assert_eq!(hash1, hash2);
}
#[test]
fn setting_elements_leads_to_correct_list_hash_randomized() {
const LIST_LEN: usize = 32;
let mut rng = thread_rng();
let db = TemporaryDB::new();
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
for _ in 0..10 {
// Prepare two copies of values with sufficient intersection.
let values: [u16; LIST_LEN] = rng.gen();
let mut new_values: [u16; LIST_LEN] = rng.gen();
for i in 0..LIST_LEN {
if rng.gen::<bool>() {
new_values[i] = values[i];
}
}
let proof_ranges: Vec<_> = (0..50)
.map(|_| {
let start = rng.gen_range(0, LIST_LEN as u64);
let end = rng.gen_range(start, LIST_LEN as u64) + 1;
start..end
})
.collect();
list.clear();
list.extend(new_values.iter().cloned());
let list_hash = list.object_hash();
let expected_proofs: Vec<_> = proof_ranges
.iter()
.map(|range| list.get_range_proof(range.clone()))
.collect();
list.clear();
list.extend(values.iter().cloned());
for i in 0..values.len() {
if values[i] != new_values[i] {
list.set(i as u64, new_values[i]);
}
}
assert_eq!(list.object_hash(), list_hash);
for (i, range) in proof_ranges.into_iter().enumerate() {
let proof = list.get_range_proof(range.clone());
assert_eq!(
proof, expected_proofs[i],
"Unexpected proof for range {:?}",
range
);
}
}
}
#[test]
fn truncating_list() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
list.extend(0_u32..30);
list.truncate(5);
assert_eq!(list.len(), 5);
assert_eq!(list.get(3), Some(3));
assert_eq!(list.get(7), None);
assert!(list.iter().eq(0_u32..5));
assert!(list.iter_from(3).eq(3_u32..5));
// Check that the branches are removed.
let level_lengths = vec![5, 5, 3, 2, 1];
for height in 1..tree_height_by_length(30) {
let level_len = level_lengths
.get(height as usize)
.copied()
.unwrap_or_default();
if level_len > 0 {
assert!(list
.get_branch(ProofListKey::new(height, level_len - 1))
.is_some());
}
for index in level_len..(level_len + 30) {
let key = ProofListKey::new(height, index);
assert!(
list.get_branch(key).is_none(),
"Branch wasn't removed: {:?}",
key
);
}
}
}
#[test]
fn truncating_list_leads_to_expected_hash() {
let mut rng = thread_rng();
let db = TemporaryDB::new();
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
for _ in 0..10 {
let values: [u32; 32] = rng.gen();
let truncated_len = rng.gen_range(5, 25);
let proof_ranges: Vec<_> = (0..50)
.map(|_| {
let start = rng.gen_range(0, truncated_len as u64);
let end = rng.gen_range(start, truncated_len as u64) + 1;
start..end
})
.collect();
list.clear();
list.extend(values[..truncated_len].iter().copied());
let list_hash = list.object_hash();
let expected_proofs: Vec<_> = proof_ranges
.iter()
.map(|range| list.get_range_proof(range.clone()))
.collect();
list.clear();
list.extend(values.iter().copied());
list.truncate(truncated_len as u64);
assert_eq!(list.object_hash(), list_hash);
for (i, range) in proof_ranges.into_iter().enumerate() {
let proof = list.get_range_proof(range.clone());
assert_eq!(
proof, expected_proofs[i],
"Unexpected proof for range {:?}",
range
);
}
}
// Check different values of `truncated_len` (including extreme ones).
let values: [u32; 17] = rng.gen();
for truncated_len in 0..=values.len() {
list.clear();
list.extend(values[..truncated_len].iter().copied());
let list_hash = list.object_hash();
list.clear();
list.extend(values.iter().copied());
list.truncate(truncated_len as u64);
assert_eq!(list.object_hash(), list_hash);
}
}
#[test]
fn popping_element_from_list() {
let db = TemporaryDB::new();
let fork = db.fork();
let mut list = fork.get_proof_list(IDX_NAME);
list.extend(0_i32..10);
let mut count = 0;
while let Some(last) = list.pop() {
count += 1;
assert_eq!(last, 10 - count);
assert_eq!(list.len(), 10 - count as u64);
}
assert!(list.is_empty());
assert_eq!(list.object_hash(), HashTag::empty_list_hash());
}
#[test]
fn proof_json_serialization() {
let mut proof = ListProof::new(vec![(1, "foo".to_owned()), (2, "bar".to_owned())], 5);
proof.push_hash(1, 0, HashTag::hash_leaf(&[4]));
proof.push_hash(2, 1, HashTag::hash_leaf(&[2]));
proof.push_hash(3, 1, HashTag::hash_leaf(&[1]));
let json = serde_json::to_value(&proof).unwrap();
assert_eq!(
json,
json!({
"entries": [(1, "foo"), (2, "bar")],
"proof": [
{ "height": 1, "index": 0, "hash": HashTag::hash_leaf(&[4]) },
{ "height": 2, "index": 1, "hash": HashTag::hash_leaf(&[2]) },
{ "height": 3, "index": 1, "hash": HashTag::hash_leaf(&[1]) },
],
"length": 5,
})
);
let proof_from_json: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof_from_json, proof);
}
#[test]
fn unordered_proofs() {
let json = json!({
"entries": [(2, "foo"), (1, "bar")],
"proof": [],
"length": 3,
});
let proof: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::Unordered);
let json = json!({
"entries": [(2, "foo")],
"proof": [
{ "height": 1, "index": 3, "hash": Hash::zero() },
{ "height": 1, "index": 1, "hash": Hash::zero() },
],
"length": 5,
});
let proof: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::Unordered);
let json = json!({
"entries": [(2, "foo")],
"proof": [
{ "height": 2, "index": 1, "hash": Hash::zero() },
{ "height": 2, "index": 3, "hash": Hash::zero() },
{ "height": 1, "index": 2, "hash": Hash::zero() },
],
"length": 100,
});
let proof: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::Unordered);
}
#[test]
fn non_empty_proof_for_empty_tree() {
let json = json!({
"entries": [(1, "bar")],
"proof": [],
"length": 0,
});
let proof: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::NonEmptyProof);
let json = json!({
"entries": [],
"proof": [{ "height": 1, "index": 1, "hash": Hash::zero() }],
"length": 0,
});
let proof: ListProof<String> = serde_json::from_value(json).unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::NonEmptyProof);
}
#[test]
fn proofs_with_unexpected_branches() {
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [(2, 2)],
"proof": [
{ "height": 10, "index": 2, "hash": Hash::zero() },
],
"length": 10,
}))
.unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::UnexpectedBranch);
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [(2, 2)],
"proof": [
{ "height": 5, "index": 0, "hash": Hash::zero() },
],
"length": 10,
}))
.unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::UnexpectedBranch);
let mut proof = ListProof::new(vec![(1, "foo".to_owned()), (2, "bar".to_owned())], 3);
proof.push_hash(2, 2, Hash::zero());
assert_eq!(proof.check().unwrap_err(), ListProofError::UnexpectedBranch);
let mut proof = ListProof::new(vec![(1, "foo".to_owned()), (2, "bar".to_owned())], 3);
proof.push_hash(1, 4, Hash::zero());
assert_eq!(proof.check().unwrap_err(), ListProofError::UnexpectedBranch);
let mut proof = ListProof::new(vec![(1, "foo".to_owned()), (2, "bar".to_owned())], 5);
proof.push_hash(1, 6, Hash::zero());
}
#[test]
fn proofs_with_unexpected_leaf() {
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [(2, 2)],
"proof": [
{ "height": 0, "index": 1, "hash": Hash::zero() },
{ "height": 1, "index": 1, "hash": Hash::zero() },
],
"length": 5,
}))
.unwrap();
assert_eq!(proof.check().unwrap_err(), ListProofError::UnexpectedLeaf);
}
#[test]
fn proofs_with_missing_entry() {
let proof = ListProof::new(vec![(1, 1_u64), (2, 2)], 3);
// (1, 0) is missing
assert_eq!(proof.check().unwrap_err(), ListProofError::MissingHash);
let mut proof = ListProof::new(vec![(1, 1_u64)], 7);
proof.push_hash(1, 0, Hash::zero());
// (2, 1) is missing
assert_eq!(proof.check().unwrap_err(), ListProofError::MissingHash);
let mut proof = ListProof::new(vec![(1, 1_u64), (2, 2)], 9);
proof.push_hash(1, 0, Hash::zero());
proof.push_hash(1, 3, Hash::zero());
// (3, 1) is missing
assert_eq!(proof.check().unwrap_err(), ListProofError::MissingHash);
let mut proof = ListProof::new(vec![(1, 1_u64), (2, 2), (4, 4)], 8);
proof.push_hash(1, 0, Hash::zero());
proof.push_hash(1, 3, Hash::zero());
proof.push_hash(2, 3, Hash::zero());
assert_eq!(proof.check().unwrap_err(), ListProofError::MissingHash);
}
#[test]
fn invalid_proofs_with_no_values() {
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [],
"proof": [],
"length": 1,
}))
.unwrap();
assert_eq!(
proof.check().unwrap_err(),
ListProofError::MissingHash // we expected 1 hash
);
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [],
"proof": [
{ "height": 0, "index": 1, "hash": Hash::zero() },
{ "height": 1, "index": 1, "hash": Hash::zero() },
],
"length": 5,
}))
.unwrap();
assert_eq!(
proof.check().unwrap_err(),
ListProofError::UnexpectedBranch // we expected 1 hash, got 2
);
let proof: ListProof<u64> = serde_json::from_value(json!({
"entries": [],
"proof": [
{ "height": 0, "index": 1, "hash": Hash::zero() },
],
"length": 5,
}))
.unwrap();
assert_eq!(
proof.check().unwrap_err(),
ListProofError::UnexpectedBranch // the hash is at an incorrect position
);
}
mod root_hash {
use crate::{access::AccessExt, hash::HashTag, BinaryValue, Database, ObjectHash, TemporaryDB};
use exonum_crypto::{self, Hash};
/// Cross-verify `object_hash()` with `ProofListIndex` against expected root hash value.
fn assert_object_hash_correct<V>(values: &[V])
where
V: BinaryValue + Clone,
{
let root_actual = HashTag::hash_list(values);
let root_index = proof_list_index_root(values);
assert_eq!(root_actual, root_index);
}
fn proof_list_index_root<V>(hashes: &[V]) -> Hash
where
V: BinaryValue + Clone,
{
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list("merkle_root");
index.extend(hashes.iter().cloned());
index.object_hash()
}
fn to_list_of_hashes(bytes: &[&[u8]]) -> Vec<Hash> {
bytes
.iter()
.map(|chunk| exonum_crypto::hash(chunk))
.collect()
}
#[test]
fn object_hash_single() {
assert_object_hash_correct(&to_list_of_hashes(&[b"1"]));
}
#[test]
fn object_hash_even() {
assert_object_hash_correct(&to_list_of_hashes(&[b"1", b"2", b"3", b"4"]));
}
#[test]
fn object_hash_odd() {
assert_object_hash_correct(&to_list_of_hashes(&[b"1", b"2", b"3", b"4", b"5"]));
}
#[test]
fn object_hash_with_integers() {
let numbers = [2_u32, 3, 5, 8, 13, 21, 34, 55];
for i in 1..numbers.len() {
assert_object_hash_correct(&numbers[..i]);
}
}
#[test]
fn object_hash_with_bytes() {
let bytes: Vec<_> = [b"foo" as &[_], b"bar", b"bark", b"lorem", b"ipsum"]
.iter()
.map(|slice| slice.to_vec())
.collect();
for i in 1..bytes.len() {
assert_object_hash_correct(&bytes[..i]);
}
}
#[test]
fn object_hash_with_strings() {
const STRING: &str =
"All human beings are born free and equal in dignity and rights. \
They are endowed with reason and conscience and should act towards one another \
in a spirit of brotherhood.";
let words: Vec<_> = STRING.split_whitespace().map(str::to_owned).collect();
for i in 1..words.len() {
assert_object_hash_correct(&words[..i]);
}
}
#[test]
fn object_hash_empty() {
assert_object_hash_correct(&to_list_of_hashes(&[]));
}
}
| {
let db = TemporaryDB::new();
let fork = db.fork();
let mut index = fork.get_proof_list(IDX_NAME);
assert_eq!(index.height(), 0);
index.push(vec![1]);
assert_eq!(index.height(), 1);
index.push(vec![2]);
assert_eq!(index.height(), 2);
index.push(vec![3]);
assert_eq!(index.height(), 3);
index.push(vec![4]);
assert_eq!(index.height(), 3);
} |
server.rs | #![deny(warnings)]
extern crate hyper;
extern crate env_logger;
#[macro_use]
extern crate log;
use std::io::{self, Read, Write};
use hyper::{Get, Post};
use hyper::header::ContentLength;
use hyper::http::{Decoder, Encoder, Next};
use hyper::net::HttpStream;
use hyper::server::{Server, Handler, Request, Response};
use hyper::status::StatusCode;
use hyper::uri::RequestUri::AbsolutePath;
struct Echo {
buf: Vec<u8>,
read_pos: usize,
write_pos: usize,
eof: bool,
route: Route,
}
enum Route {
NotFound,
Index,
Echo(Body),
}
#[derive(Clone, Copy)]
enum Body {
Len(u64),
Chunked
}
static INDEX: &'static [u8] = b"Try POST /echo";
impl Echo {
fn new() -> Echo {
Echo {
buf: vec![0; 4096],
read_pos: 0,
write_pos: 0,
eof: false,
route: Route::NotFound,
}
}
}
impl Handler<HttpStream> for Echo {
fn on_request(&mut self, req: Request) -> Next {
match *req.uri() {
AbsolutePath(ref path) => match (req.method(), &path[..]) {
(&Get, "/") | (&Get, "/echo") => {
info!("GET Index");
self.route = Route::Index;
Next::write()
}
(&Post, "/echo") => {
info!("POST Echo");
let mut is_more = true;
self.route = if let Some(len) = req.headers().get::<ContentLength>() {
is_more = **len > 0;
Route::Echo(Body::Len(**len))
} else {
Route::Echo(Body::Chunked)
};
if is_more {
Next::read_and_write()
} else {
Next::write()
}
}
_ => Next::write(),
},
_ => Next::write()
}
}
fn on_request_readable(&mut self, transport: &mut Decoder<HttpStream>) -> Next {
match self.route {
Route::Echo(ref body) => {
if self.read_pos < self.buf.len() {
match transport.read(&mut self.buf[self.read_pos..]) {
Ok(0) => {
debug!("Read 0, eof");
self.eof = true;
Next::write()
},
Ok(n) => {
self.read_pos += n;
match *body {
Body::Len(max) if max <= self.read_pos as u64 => {
self.eof = true;
Next::write()
},
_ => Next::read_and_write()
}
}
Err(e) => match e.kind() {
io::ErrorKind::WouldBlock => Next::read_and_write(),
_ => {
println!("read error {:?}", e);
Next::end()
}
}
}
} else {
Next::write()
}
}
_ => unreachable!()
}
}
fn on_response(&mut self, res: &mut Response) -> Next {
match self.route {
Route::NotFound => {
res.set_status(StatusCode::NotFound);
Next::end()
}
Route::Index => {
res.headers_mut().set(ContentLength(INDEX.len() as u64));
Next::write()
}
Route::Echo(body) => {
if let Body::Len(len) = body {
res.headers_mut().set(ContentLength(len));
}
Next::read_and_write()
}
}
}
fn on_response_writable(&mut self, transport: &mut Encoder<HttpStream>) -> Next {
match self.route {
Route::Index => {
transport.write(INDEX).unwrap();
Next::end()
}
Route::Echo(..) => {
if self.write_pos < self.read_pos { | Ok(n) => {
self.write_pos += n;
Next::write()
}
Err(e) => match e.kind() {
io::ErrorKind::WouldBlock => Next::write(),
_ => {
println!("write error {:?}", e);
Next::end()
}
}
}
} else if !self.eof {
Next::read()
} else {
Next::end()
}
}
_ => unreachable!()
}
}
}
fn main() {
env_logger::init().unwrap();
let server = Server::http("127.0.0.1:1337").unwrap();
let _guard = server.handle(|_| Echo::new());
println!("Listening on http://127.0.0.1:1337");
} | match transport.write(&self.buf[self.write_pos..self.read_pos]) {
Ok(0) => panic!("write ZERO"), |
storage_authentication.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
authenticationv1 "k8s.io/api/authentication/v1"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/authentication"
"k8s.io/kubernetes/pkg/registry/authentication/tokenreview"
)
type RESTStorageProvider struct {
Authenticator authenticator.Request
}
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) {
// TODO figure out how to make the swagger generation stable, while allowing this endpoint to be disabled.
// if p.Authenticator == nil {
// return genericapiserver.APIGroupInfo{}, false
// }
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(authentication.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs)
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
// TODO refactor the plumbing to provide the information in the APIGroupInfo
if apiResourceConfigSource.AnyResourcesForVersionEnabled(authenticationv1beta1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[authenticationv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter)
apiGroupInfo.GroupMeta.GroupVersion = authenticationv1beta1.SchemeGroupVersion
}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(authenticationv1.SchemeGroupVersion) {
apiGroupInfo.VersionedResourcesStorageMap[authenticationv1.SchemeGroupVersion.Version] = p.v1Storage(apiResourceConfigSource, restOptionsGetter)
apiGroupInfo.GroupMeta.GroupVersion = authenticationv1.SchemeGroupVersion
}
return apiGroupInfo, true
}
func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage {
version := authenticationv1beta1.SchemeGroupVersion
storage := map[string]rest.Storage{}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(authenticationv1beta1.SchemeGroupVersion) {
if apiResourceConfigSource.ResourceEnabled(version.WithResource("tokenreviews")) {
tokenReviewStorage := tokenreview.NewREST(p.Authenticator)
storage["tokenreviews"] = tokenReviewStorage
}
}
return storage
}
func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage {
version := authenticationv1.SchemeGroupVersion
storage := map[string]rest.Storage{}
if apiResourceConfigSource.AnyResourcesForVersionEnabled(authenticationv1.SchemeGroupVersion) {
if apiResourceConfigSource.ResourceEnabled(version.WithResource("tokenreviews")) |
}
return storage
}
func (p RESTStorageProvider) GroupName() string {
return authentication.GroupName
}
| {
tokenReviewStorage := tokenreview.NewREST(p.Authenticator)
storage["tokenreviews"] = tokenReviewStorage
} |
loader.py | from mirage.libs import io
class Loader:
| '''
This class permits to dynamically load the modules.
'''
def __init__(self):
'''
This constructor generates the modules list.
'''
import mirage.modules as modules
self.modulesList = {}
for moduleName,module in modules.__modules__.items():
current = module#__import__("modules."+module, fromlist=module)
moduleClass = getattr(current,moduleName)
self.modulesList[moduleName] = moduleClass
def getModulesNames(self):
'''
This method returns a list of existing modules' names.
:return: list of modules' name
:rtype: list of str
'''
return list(self.modulesList.keys())
def load(self,moduleName):
'''
This method returns an instance of a specific module according to the name provided as parameter.
:param moduleName: name of a module
:type moduleName: str
:return: an instance of the module
:rtype: core.module.Module
'''
if moduleName in self.modulesList:
return self.modulesList[moduleName]()
else:
return None
def list(self,pattern=""):
'''
Display the list of module, filtered by the string provided as ``pattern``.
:param pattern: filter
:type pattern: str
'''
displayDict = {}
for module in self.modulesList:
info = self.modulesList[module]().info()
technology = (info["technology"]).upper()
if (
pattern in info["description"] or
pattern in info["name"] or
pattern in info["technology"] or
pattern in info["type"]
):
if not technology in displayDict:
displayDict[technology] = []
displayDict[technology].append([info["name"], info["type"], info["description"]])
for module in sorted(displayDict):
if displayDict[module]:
io.chart(["Name", "Type","Description"], sorted(displayDict[module]), "{} Modules".format(module)) |
|
operations.rs | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn access_policies(&self) -> access_policies::Client {
access_policies::Client(self.clone())
}
pub fn environments(&self) -> environments::Client {
environments::Client(self.clone())
}
pub fn event_sources(&self) -> event_sources::Client {
event_sources::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn reference_data_sets(&self) -> reference_data_sets::Client {
reference_data_sets::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
Environments_Get(#[from] environments::get::Error),
#[error(transparent)]
Environments_CreateOrUpdate(#[from] environments::create_or_update::Error),
#[error(transparent)]
Environments_Update(#[from] environments::update::Error),
#[error(transparent)]
Environments_Delete(#[from] environments::delete::Error),
#[error(transparent)]
Environments_ListByResourceGroup(#[from] environments::list_by_resource_group::Error),
#[error(transparent)]
Environments_ListBySubscription(#[from] environments::list_by_subscription::Error),
#[error(transparent)]
EventSources_Get(#[from] event_sources::get::Error),
#[error(transparent)]
EventSources_CreateOrUpdate(#[from] event_sources::create_or_update::Error),
#[error(transparent)]
EventSources_Update(#[from] event_sources::update::Error),
#[error(transparent)]
EventSources_Delete(#[from] event_sources::delete::Error),
#[error(transparent)]
EventSources_ListByEnvironment(#[from] event_sources::list_by_environment::Error),
#[error(transparent)]
ReferenceDataSets_Get(#[from] reference_data_sets::get::Error),
#[error(transparent)]
ReferenceDataSets_CreateOrUpdate(#[from] reference_data_sets::create_or_update::Error),
#[error(transparent)]
ReferenceDataSets_Update(#[from] reference_data_sets::update::Error),
#[error(transparent)]
ReferenceDataSets_Delete(#[from] reference_data_sets::delete::Error),
#[error(transparent)]
ReferenceDataSets_ListByEnvironment(#[from] reference_data_sets::list_by_environment::Error),
#[error(transparent)]
AccessPolicies_Get(#[from] access_policies::get::Error),
#[error(transparent)]
AccessPolicies_CreateOrUpdate(#[from] access_policies::create_or_update::Error),
#[error(transparent)]
AccessPolicies_Update(#[from] access_policies::update::Error),
#[error(transparent)]
AccessPolicies_Delete(#[from] access_policies::delete::Error),
#[error(transparent)]
AccessPolicies_ListByEnvironment(#[from] access_policies::list_by_environment::Error),
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationListResult, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.TimeSeriesInsights/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod environments {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
expand: None,
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
parameters: impl Into<models::EnvironmentCreateOrUpdateParameters>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
environment_update_parameters: impl Into<models::EnvironmentUpdateParameters>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
environment_update_parameters: environment_update_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
}
}
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) expand: Option<String>,
}
impl Builder {
pub fn expand(mut self, expand: impl Into<String>) -> Self {
self.expand = Some(expand.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EnvironmentResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
if let Some(expand) = &self.expand {
url.query_pairs_mut().append_pair("$expand", expand);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::EnvironmentResource),
Created201(models::EnvironmentResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) parameters: models::EnvironmentCreateOrUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::NOT_FOUND => Err(Error::NotFound404 {}),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) environment_update_parameters: models::EnvironmentUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EnvironmentResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.environment_update_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EnvironmentListResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentListResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EnvironmentListResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.TimeSeriesInsights/environments",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EnvironmentListResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod event_sources {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
event_source_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
event_source_name: event_source_name.into(),
}
}
pub fn | (
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
event_source_name: impl Into<String>,
parameters: impl Into<models::EventSourceCreateOrUpdateParameters>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
event_source_name: event_source_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
event_source_name: impl Into<String>,
event_source_update_parameters: impl Into<models::EventSourceUpdateParameters>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
event_source_name: event_source_name.into(),
event_source_update_parameters: event_source_update_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
event_source_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
event_source_name: event_source_name.into(),
}
}
pub fn list_by_environment(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
) -> list_by_environment::Builder {
list_by_environment::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) event_source_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EventSourceResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/eventSources/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.event_source_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EventSourceResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::EventSourceResource),
Created201(models::EventSourceResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) event_source_name: String,
pub(crate) parameters: models::EventSourceCreateOrUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/eventSources/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.event_source_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EventSourceResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EventSourceResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) event_source_name: String,
pub(crate) event_source_update_parameters: models::EventSourceUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EventSourceResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/eventSources/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.event_source_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.event_source_update_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EventSourceResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) event_source_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/eventSources/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.event_source_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_environment {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::EventSourceListResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/eventSources",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::EventSourceListResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod reference_data_sets {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
reference_data_set_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
reference_data_set_name: reference_data_set_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
reference_data_set_name: impl Into<String>,
parameters: impl Into<models::ReferenceDataSetCreateOrUpdateParameters>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
reference_data_set_name: reference_data_set_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
reference_data_set_name: impl Into<String>,
reference_data_set_update_parameters: impl Into<models::ReferenceDataSetUpdateParameters>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
reference_data_set_name: reference_data_set_name.into(),
reference_data_set_update_parameters: reference_data_set_update_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
reference_data_set_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
reference_data_set_name: reference_data_set_name.into(),
}
}
pub fn list_by_environment(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
) -> list_by_environment::Builder {
list_by_environment::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) reference_data_set_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ReferenceDataSetResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/referenceDataSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.reference_data_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ReferenceDataSetResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::ReferenceDataSetResource),
Created201(models::ReferenceDataSetResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) reference_data_set_name: String,
pub(crate) parameters: models::ReferenceDataSetCreateOrUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/referenceDataSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.reference_data_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ReferenceDataSetResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ReferenceDataSetResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) reference_data_set_name: String,
pub(crate) reference_data_set_update_parameters: models::ReferenceDataSetUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ReferenceDataSetResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/referenceDataSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.reference_data_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.reference_data_set_update_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ReferenceDataSetResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) reference_data_set_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/referenceDataSets/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.reference_data_set_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_environment {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::ReferenceDataSetListResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/referenceDataSets",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ReferenceDataSetListResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod access_policies {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
access_policy_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
access_policy_name: access_policy_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
access_policy_name: impl Into<String>,
parameters: impl Into<models::AccessPolicyCreateOrUpdateParameters>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
access_policy_name: access_policy_name.into(),
parameters: parameters.into(),
}
}
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
access_policy_name: impl Into<String>,
access_policy_update_parameters: impl Into<models::AccessPolicyUpdateParameters>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
access_policy_name: access_policy_name.into(),
access_policy_update_parameters: access_policy_update_parameters.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
access_policy_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
access_policy_name: access_policy_name.into(),
}
}
pub fn list_by_environment(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
environment_name: impl Into<String>,
) -> list_by_environment::Builder {
list_by_environment::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
environment_name: environment_name.into(),
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) access_policy_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AccessPolicyResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/accessPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.access_policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AccessPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200(models::AccessPolicyResource),
Created201(models::AccessPolicyResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) access_policy_name: String,
pub(crate) parameters: models::AccessPolicyCreateOrUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/accessPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.access_policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AccessPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AccessPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) access_policy_name: String,
pub(crate) access_policy_update_parameters: models::AccessPolicyUpdateParameters,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AccessPolicyResource, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/accessPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.access_policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.access_policy_update_parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AccessPolicyResource =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
pub(crate) access_policy_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/accessPolicies/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name,
&self.access_policy_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_environment {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) environment_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AccessPolicyListResponse, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.TimeSeriesInsights/environments/{}/accessPolicies",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.environment_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2020-05-15");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AccessPolicyListResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| create_or_update |
container_windows.go | // +build windows
package daemon
import (
"fmt"
"strings"
"github.com/docker/docker/daemon/execdriver"
)
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""
// Container holds fields specific to the Windows implementation. See
// CommonContainer for standard fields common to all containers.
type Container struct {
CommonContainer
// Fields below here are platform specific.
}
func killProcessDirectly(container *Container) error |
func (container *Container) setupLinkedContainers() ([]string, error) {
return nil, nil
}
func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
// On Windows, nothing to link. Just return the container environment.
return container.Config.Env
}
func (container *Container) initializeNetworking() error {
return nil
}
func (container *Container) setupWorkingDirectory() error {
return nil
}
func populateCommand(c *Container, env []string) error {
en := &execdriver.Network{
Interface: nil,
}
parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
switch parts[0] {
case "none":
case "default", "": // empty string to support existing containers
if !c.Config.NetworkDisabled {
en.Interface = &execdriver.NetworkInterface{
MacAddress: c.Config.MacAddress,
Bridge: c.daemon.configStore.Bridge.VirtualSwitchName,
PortBindings: c.hostConfig.PortBindings,
// TODO Windows. Include IPAddress. There already is a
// property IPAddress on execDrive.CommonNetworkInterface,
// but there is no CLI option in docker to pass through
// an IPAddress on docker run.
}
}
default:
return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
}
pid := &execdriver.Pid{}
// TODO Windows. This can probably be factored out.
pid.HostPid = c.hostConfig.PidMode.IsHost()
// TODO Windows. Resource controls to be implemented later.
resources := &execdriver.Resources{}
// TODO Windows. Further refactoring required (privileged/user)
processConfig := execdriver.ProcessConfig{
Privileged: c.hostConfig.Privileged,
Entrypoint: c.Path,
Arguments: c.Args,
Tty: c.Config.Tty,
User: c.Config.User,
ConsoleSize: c.hostConfig.ConsoleSize,
}
processConfig.Env = env
var layerPaths []string
img, err := c.daemon.graph.Get(c.ImageID)
if err != nil {
return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err)
}
for i := img; i != nil && err == nil; i, err = c.daemon.graph.GetParent(i) {
lp, err := c.daemon.driver.Get(i.ID, "")
if err != nil {
return fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", c.daemon.driver.String(), i.ID, err)
}
layerPaths = append(layerPaths, lp)
err = c.daemon.driver.Put(i.ID)
if err != nil {
return fmt.Errorf("Failed to put layer path from graphdriver %s for ImageID %s - %s", c.daemon.driver.String(), i.ID, err)
}
}
m, err := c.daemon.driver.GetMetadata(c.ID)
if err != nil {
return fmt.Errorf("Failed to get layer metadata - %s", err)
}
layerFolder := m["dir"]
// TODO Windows: Factor out remainder of unused fields.
c.command = &execdriver.Command{
ID: c.ID,
Rootfs: c.rootfsPath(),
ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
InitPath: "/.dockerinit",
WorkingDir: c.Config.WorkingDir,
Network: en,
Pid: pid,
Resources: resources,
CapAdd: c.hostConfig.CapAdd.Slice(),
CapDrop: c.hostConfig.CapDrop.Slice(),
ProcessConfig: processConfig,
ProcessLabel: c.getProcessLabel(),
MountLabel: c.getMountLabel(),
FirstStart: !c.HasBeenStartedBefore,
LayerFolder: layerFolder,
LayerPaths: layerPaths,
}
return nil
}
// GetSize returns real size & virtual size
func (container *Container) getSize() (int64, int64) {
// TODO Windows
return 0, 0
}
// allocateNetwork is a no-op on Windows.
func (container *Container) allocateNetwork() error {
return nil
}
func (container *Container) updateNetwork() error {
return nil
}
func (container *Container) releaseNetwork() {
}
func (container *Container) unmountVolumes(forceSyscall bool) error {
return nil
}
// prepareMountPoints is a no-op on Windows
func (container *Container) prepareMountPoints() error {
return nil
}
// removeMountPoints is a no-op on Windows.
func (container *Container) removeMountPoints(_ bool) error {
return nil
}
| {
return nil
} |
babel-polyfill.7.6.0.js | !function r(c,a,f){function o(n,t){if(!a[n]){if(!c[n]){var e="function"==typeof require&&require;if(!t&&e)return e(n,!0);if(s)return s(n,!0);var i=new Error("Cannot find module '"+n+"'");throw i.code="MODULE_NOT_FOUND",i}var u=a[n]={exports:{}};c[n][0].call(u.exports,function(t){return o(c[n][1][t]||t)},u,u.exports,r,c,a,f)}return a[n].exports}for(var s="function"==typeof require&&require,t=0;t<f.length;t++)o(f[t]);return o}({1:[function(t,n,r){"use strict";t(2);var e=function _interopRequireDefault(t){return t&&t.__esModule?t:{default:t}}(t(15));e.default._babelPolyfill&&"undefined"!=typeof console&&console.warn&&console.warn("@babel/polyfill is loaded more than once on this page. This is probably not desirable/intended and may have consequences if different versions of the polyfills are applied sequentially. If you do need to load the polyfill more than once, use @babel/polyfill/noConflict instead to bypass the warning."),e.default._babelPolyfill=!0},{15:15,2:2}],2:[function(t,n,r){"use strict";t(3),t(5),t(4),t(11),t(10),t(13),t(12),t(14),t(7),t(8),t(6),t(9),t(306),t(307)},{10:10,11:11,12:12,13:13,14:14,3:3,306:306,307:307,4:4,5:5,6:6,7:7,8:8,9:9}],3:[function(t,n,r){t(278),t(214),t(216),t(215),t(218),t(220),t(225),t(219),t(217),t(227),t(226),t(222),t(223),t(221),t(213),t(224),t(228),t(229),t(180),t(182),t(181),t(231),t(230),t(201),t(211),t(212),t(202),t(203),t(204),t(205),t(206),t(207),t(208),t(209),t(210),t(184),t(185),t(186),t(187),t(188),t(189),t(190),t(191),t(192),t(193),t(194),t(195),t(196),t(197),t(198),t(199),t(200),t(265),t(270),t(277),t(268),t(260),t(261),t(266),t(271),t(273),t(256),t(257),t(258),t(259),t(262),t(263),t(264),t(267),t(269),t(272),t(274),t(275),t(276),t(175),t(177),t(176),t(179),t(178),t(163),t(161),t(168),t(165),t(171),t(173),t(160),t(167),t(157),t(172),t(155),t(170),t(169),t(162),t(166),t(154),t(156),t(159),t(158),t(174),t(164),t(247),t(248),t(254),t(249),t(250),t(251),t(252),t(253),t(232),t(183),t(255),t(290),t(291),t(279),t(280),t(285),t(288),t(289),t(283),t(286),t(284),t(287),t(281),t(282),t(233),t(234),t(235),t(236),t(237),t(240),t(238),t(239),t(241),t(242),t(243),t(244),t(246),t(245),n.exports=t(52)},{154:154,155:155,156:156,157:157,158:158,159:159,160:160,161:161,162:162,163:163,164:164,165:165,166:166,167:167,168:168,169:169,170:170,171:171,172:172,173:173,174:174,175:175,176:176,177:177,178:178,179:179,180:180,181:181,182:182,183:183,184:184,185:185,186:186,187:187,188:188,189:189,190:190,191:191,192:192,193:193,194:194,195:195,196:196,197:197,198:198,199:199,200:200,201:201,202:202,203:203,204:204,205:205,206:206,207:207,208:208,209:209,210:210,211:211,212:212,213:213,214:214,215:215,216:216,217:217,218:218,219:219,220:220,221:221,222:222,223:223,224:224,225:225,226:226,227:227,228:228,229:229,230:230,231:231,232:232,233:233,234:234,235:235,236:236,237:237,238:238,239:239,240:240,241:241,242:242,243:243,244:244,245:245,246:246,247:247,248:248,249:249,250:250,251:251,252:252,253:253,254:254,255:255,256:256,257:257,258:258,259:259,260:260,261:261,262:262,263:263,264:264,265:265,266:266,267:267,268:268,269:269,270:270,271:271,272:272,273:273,274:274,275:275,276:276,277:277,278:278,279:279,280:280,281:281,282:282,283:283,284:284,285:285,286:286,287:287,288:288,289:289,290:290,291:291,52:52}],4:[function(t,n,r){t(292),n.exports=t(52).Array.flatMap},{292:292,52:52}],5:[function(t,n,r){t(293),n.exports=t(52).Array.includes},{293:293,52:52}],6:[function(t,n,r){t(294),n.exports=t(52).Object.entries},{294:294,52:52}],7:[function(t,n,r){t(295),n.exports=t(52).Object.getOwnPropertyDescriptors},{295:295,52:52}],8:[function(t,n,r){t(296),n.exports=t(52).Object.values},{296:296,52:52}],9:[function(t,n,r){"use strict";t(232),t(297),n.exports=t(52).Promise.finally},{232:232,297:297,52:52}],10:[function(t,n,r){t(298),n.exports=t(52).String.padEnd},{298:298,52:52}],11:[function(t,n,r){t(299),n.exports=t(52).String.padStart},{299:299,52:52}],12:[function(t,n,r){t(301),n.exports=t(52).String.trimRight},{301:301,52:52}],13:[function(t,n,r){t(300),n.exports=t(52).String.trimLeft},{300:300,52:52}],14:[function(t,n,r){t(302),n.exports=t(151).f("asyncIterator")},{151:151,302:302}],15:[function(t,n,r){t(32),n.exports=t(18).global},{18:18,32:32}],16:[function(t,n,r){n.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},{}],17:[function(t,n,r){var e=t(28);n.exports=function(t){if(!e(t))throw TypeError(t+" is not an object!");return t}},{28:28}],18:[function(t,n,r){var e=n.exports={version:"2.6.9"};"number"==typeof __e&&(__e=e)},{}],19:[function(t,n,r){var o=t(16);n.exports=function(e,i,t){if(o(e),void 0===i)return e;switch(t){case 1:return function(t){return e.call(i,t)};case 2:return function(t,n){return e.call(i,t,n)};case 3:return function(t,n,r){return e.call(i,t,n,r)}}return function(){return e.apply(i,arguments)}}},{16:16}],20:[function(t,n,r){n.exports=!t(23)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},{23:23}],21:[function(t,n,r){var e=t(28),i=t(24).document,o=e(i)&&e(i.createElement);n.exports=function(t){return o?i.createElement(t):{}}},{24:24,28:28}],22:[function(t,n,r){var g=t(24),y=t(18),d=t(19),x=t(26),m=t(25),S="prototype",b=function(t,n,r){var e,i,o,u=t&b.F,c=t&b.G,a=t&b.S,f=t&b.P,s=t&b.B,l=t&b.W,h=c?y:y[n]||(y[n]={}),p=h[S],v=c?g:a?g[n]:(g[n]||{})[S];for(e in c&&(r=n),r)(i=!u&&v&&void 0!==v[e])&&m(h,e)||(o=i?v[e]:r[e],h[e]=c&&"function"!=typeof v[e]?r[e]:s&&i?d(o,g):l&&v[e]==o?function(e){function qb(t,n,r){if(this instanceof e){switch(arguments.length){case 0:return new e;case 1:return new e(t);case 2:return new e(t,n)}return new e(t,n,r)}return e.apply(this,arguments)}return qb[S]=e[S],qb}(o):f&&"function"==typeof o?d(Function.call,o):o,f&&((h.virtual||(h.virtual={}))[e]=o,t&b.R&&p&&!p[e]&&x(p,e,o)))};b.F=1,b.G=2,b.S=4,b.P=8,b.B=16,b.W=32,b.U=64,b.R=128,n.exports=b},{18:18,19:19,24:24,25:25,26:26}],23:[function(t,n,r){n.exports=function(t){try{return!!t()}catch(t){return!0}}},{}],24:[function(t,n,r){var e=n.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=e)},{}],25:[function(t,n,r){var e={}.hasOwnProperty;n.exports=function(t,n){return e.call(t,n)}},{}],26:[function(t,n,r){var e=t(29),i=t(30);n.exports=t(20)?function(t,n,r){return e.f(t,n,i(1,r))}:function(t,n,r){return t[n]=r,t}},{20:20,29:29,30:30}],27:[function(t,n,r){n.exports=!t(20)&&!t(23)(function(){return 7!=Object.defineProperty(t(21)("div"),"a",{get:function(){return 7}}).a})},{20:20,21:21,23:23}],28:[function(t,n,r){n.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},{}],29:[function(t,n,r){var e=t(17),i=t(27),o=t(31),u=Object.defineProperty;r.f=t(20)?Object.defineProperty:function defineProperty(t,n,r){if(e(t),n=o(n,!0),e(r),i)try{return u(t,n,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported!");return"value"in r&&(t[n]=r.value),t}},{17:17,20:20,27:27,31:31}],30:[function(t,n,r){n.exports=function(t,n){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:n}}},{}],31:[function(t,n,r){var i=t(28);n.exports=function(t,n){if(!i(t))return t;var r,e;if(n&&"function"==typeof(r=t.toString)&&!i(e=r.call(t)))return e;if("function"==typeof(r=t.valueOf)&&!i(e=r.call(t)))return e;if(!n&&"function"==typeof(r=t.toString)&&!i(e=r.call(t)))return e;throw TypeError("Can't convert object to primitive value")}},{28:28}],32:[function(t,n,r){var e=t(22);e(e.G,{global:t(24)})},{22:22,24:24}],33:[function(t,n,r){arguments[4][16][0].apply(r,arguments)},{16:16}],34:[function(t,n,r){var e=t(48);n.exports=function(t,n){if("number"!=typeof t&&"Number"!=e(t))throw TypeError(n);return+t}},{48:48}],35:[function(t,n,r){var e=t(152)("unscopables"),i=Array.prototype;null==i[e]&&t(72)(i,e,{}),n.exports=function(t){i[e][t]=!0}},{152:152,72:72}],36:[function(t,n,r){"use strict";var e=t(129)(!0);n.exports=function(t,n,r){return n+(r?e(t,n).length:1)}},{129:129}],37:[function(t,n,r){n.exports=function(t,n,r,e){if(!(t instanceof n)||void 0!==e&&e in t)throw TypeError(r+": incorrect invocation!");return t}},{}],38:[function(t,n,r){arguments[4][17][0].apply(r,arguments)},{17:17,81:81}],39:[function(t,n,r){"use strict";var f=t(142),s=t(137),l=t(141);n.exports=[].copyWithin||function copyWithin(t,n){var r=f(this),e=l(r.length),i=s(t,e),o=s(n,e),u=2<arguments.length?arguments[2]:void 0,c=Math.min((void 0===u?e:s(u,e))-o,e-i),a=1;for(o<i&&i<o+c&&(a=-1,o+=c-1,i+=c-1);0<c--;)o in r?r[i]=r[o]:delete r[i],i+=a,o+=a;return r}},{137:137,141:141,142:142}],40:[function(t,n,r){"use strict";var c=t(142),a=t(137),f=t(141);n.exports=function fill(t){for(var n=c(this),r=f(n.length),e=arguments.length,i=a(1<e?arguments[1]:void 0,r),o=2<e?arguments[2]:void 0,u=void 0===o?r:a(o,r);i<u;)n[i++]=t;return n}},{137:137,141:141,142:142}],41:[function(t,n,r){var a=t(140),f=t(141),s=t(137);n.exports=function(c){return function(t,n,r){var e,i=a(t),o=f(i.length),u=s(r,o);if(c&&n!=n){for(;u<o;)if((e=i[u++])!=e)return!0}else for(;u<o;u++)if((c||u in i)&&i[u]===n)return c||u||0;return!c&&-1}}},{137:137,140:140,141:141}],42:[function(t,n,r){var m=t(54),S=t(77),b=t(142),w=t(141),e=t(45);n.exports=function(l,t){var h=1==l,p=2==l,v=3==l,g=4==l,y=6==l,d=5==l||y,x=t||e;return function(t,n,r){for(var e,i,o=b(t),u=S(o),c=m(n,r,3),a=w(u.length),f=0,s=h?x(t,a):p?x(t,0):void 0;f<a;f++)if((d||f in u)&&(i=c(e=u[f],f,o),l))if(h)s[f]=i;else if(i)switch(l){case 3:return!0;case 5:return e;case 6:return f;case 2:s.push(e)}else if(g)return!1;return y?-1:v||g?g:s}}},{141:141,142:142,45:45,54:54,77:77}],43:[function(t,n,r){var s=t(33),l=t(142),h=t(77),p=t(141);n.exports=function(t,n,r,e,i){s(n);var o=l(t),u=h(o),c=p(o.length),a=i?c-1:0,f=i?-1:1;if(r<2)for(;;){if(a in u){e=u[a],a+=f;break}if(a+=f,i?a<0:c<=a)throw TypeError("Reduce of empty array with no initial value")}for(;i?0<=a:a<c;a+=f)a in u&&(e=n(e,u[a],a,o));return e}},{141:141,142:142,33:33,77:77}],44:[function(t,n,r){var e=t(81),i=t(79),o=t(152)("species");n.exports=function(t){var n;return i(t)&&("function"!=typeof(n=t.constructor)||n!==Array&&!i(n.prototype)||(n=void 0),e(n)&&null===(n=n[o])&&(n=void 0)),void 0===n?Array:n}},{152:152,79:79,81:81}],45:[function(t,n,r){var e=t(44);n.exports=function(t,n){return new(e(t))(n)}},{44:44}],46:[function(t,n,r){"use strict";var o=t(33),u=t(81),c=t(76),a=[].slice,f={};n.exports=Function.bind||function bind(n){var r=o(this),e=a.call(arguments,1),i=function(){var t=e.concat(a.call(arguments));return this instanceof i?function(t,n,r){if(!(n in f)){for(var e=[],i=0;i<n;i++)e[i]="a["+i+"]";f[n]=Function("F,a","return new F("+e.join(",")+")")}return f[n](t,r)}(r,t.length,t):c(r,t,n)};return u(r.prototype)&&(i.prototype=r.prototype),i}},{33:33,76:76,81:81}],47:[function(t,n,r){var i=t(48),o=t(152)("toStringTag"),u="Arguments"==i(function(){return arguments}());n.exports=function(t){var n,r,e;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,n){try{return t[n]}catch(t){}}(n=Object(t),o))?r:u?i(n):"Object"==(e=i(n))&&"function"==typeof n.callee?"Arguments":e}},{152:152,48:48}],48:[function(t,n,r){var e={}.toString;n.exports=function(t){return e.call(t).slice(8,-1)}},{}],49:[function(t,n,r){"use strict";function ag(t,n){var r,e=p(n);if("F"!==e)return t._i[e];for(r=t._f;r;r=r.n)if(r.k==n)return r}var u=t(99).f,c=t(98),a=t(117),f=t(54),s=t(37),l=t(68),e=t(85),i=t(87),o=t(123),h=t(58),p=t(94).fastKey,v=t(149),g=h?"_s":"size";n.exports={getConstructor:function(t,o,r,e){var i=t(function(t,n){s(t,i,o,"_i"),t._t=o,t._i=c(null),t._f=void 0,t._l=void 0,t[g]=0,null!=n&&l(n,r,t[e],t)});return a(i.prototype,{clear:function clear(){for(var t=v(this,o),n=t._i,r=t._f;r;r=r.n)r.r=!0,r.p&&(r.p=r.p.n=void 0),delete n[r.i];t._f=t._l=void 0,t[g]=0},delete:function(t){var n=v(this,o),r=ag(n,t);if(r){var e=r.n,i=r.p;delete n._i[r.i],r.r=!0,i&&(i.n=e),e&&(e.p=i),n._f==r&&(n._f=e),n._l==r&&(n._l=i),n[g]--}return!!r},forEach:function forEach(t){v(this,o);for(var n,r=f(t,1<arguments.length?arguments[1]:void 0,3);n=n?n.n:this._f;)for(r(n.v,n.k,this);n&&n.r;)n=n.p},has:function has(t){return!!ag(v(this,o),t)}}),h&&u(i.prototype,"size",{get:function(){return v(this,o)[g]}}),i},def:function(t,n,r){var e,i,o=ag(t,n);return o?o.v=r:(t._l=o={i:i=p(n,!0),k:n,v:r,p:e=t._l,n:void 0,r:!1},t._f||(t._f=o),e&&(e.n=o),t[g]++,"F"!==i&&(t._i[i]=o)),t},getEntry:ag,setStrong:function(t,r,n){e(t,r,function(t,n){this._t=v(t,r),this._k=n,this._l=void 0},function(){for(var t=this,n=t._k,r=t._l;r&&r.r;)r=r.p;return t._t&&(t._l=r=r?r.n:t._t._f)?i(0,"keys"==n?r.k:"values"==n?r.v:[r.k,r.v]):(t._t=void 0,i(1))},n?"entries":"values",!n,!0),o(r)}}},{117:117,123:123,149:149,37:37,54:54,58:58,68:68,85:85,87:87,94:94,98:98,99:99}],50:[function(t,n,r){"use strict";function _g(t){return t._l||(t._l=new g)}function bh(t,n){return o(t.a,function(t){return t[0]===n})}var u=t(117),c=t(94).getWeak,i=t(38),a=t(81),f=t(37),s=t(68),e=t(42),l=t(71),h=t(149),o=e(5),p=e(6),v=0,g=function(){this.a=[]};g.prototype={get:function(t){var n=bh(this,t);if(n)return n[1]},has:function(t){return!!bh(this,t)},set:function(t,n){var r=bh(this,t);r?r[1]=n:this.a.push([t,n])},delete:function(n){var t=p(this.a,function(t){return t[0]===n});return~t&&this.a.splice(t,1),!!~t}},n.exports={getConstructor:function(t,r,e,i){var o=t(function(t,n){f(t,o,r,"_i"),t._t=r,t._i=v++,t._l=void 0,null!=n&&s(n,e,t[i],t)});return u(o.prototype,{delete:function(t){if(!a(t))return!1;var n=c(t);return!0===n?_g(h(this,r)).delete(t):n&&l(n,this._i)&&delete n[this._i]},has:function has(t){if(!a(t))return!1;var n=c(t);return!0===n?_g(h(this,r)).has(t):n&&l(n,this._i)}}),o},def:function(t,n,r){var e=c(i(n),!0);return!0===e?_g(t).set(n,r):e[t._i]=r,t},ufstore:_g}},{117:117,149:149,37:37,38:38,42:42,68:68,71:71,81:81,94:94}],51:[function(t,n,r){"use strict";var y=t(70),d=t(62),x=t(118),m=t(117),S=t(94),b=t(68),w=t(37),_=t(81),E=t(64),O=t(86),F=t(124),I=t(75);n.exports=function(e,t,n,r,i,o){function ci(t){var r=f[t];x(f,t,"delete"==t?function(t){return!(o&&!_(t))&&r.call(this,0===t?0:t)}:"has"==t?function has(t){return!(o&&!_(t))&&r.call(this,0===t?0:t)}:"get"==t?function get(t){return o&&!_(t)?void 0:r.call(this,0===t?0:t)}:"add"==t?function add(t){return r.call(this,0===t?0:t),this}:function set(t,n){return r.call(this,0===t?0:t,n),this})}var u=y[e],c=u,a=i?"set":"add",f=c&&c.prototype,s={};if("function"==typeof c&&(o||f.forEach&&!E(function(){(new c).entries().next()}))){var l=new c,h=l[a](o?{}:-0,1)!=l,p=E(function(){l.has(1)}),v=O(function(t){new c(t)}),g=!o&&E(function(){for(var t=new c,n=5;n--;)t[a](n,n);return!t.has(-0)});v||(((c=t(function(t,n){w(t,c,e);var r=I(new u,t,c);return null!=n&&b(n,i,r[a],r),r})).prototype=f).constructor=c),(p||g)&&(ci("delete"),ci("has"),i&&ci("get")),(g||h)&&ci(a),o&&f.clear&&delete f.clear}else c=r.getConstructor(t,e,i,a),m(c.prototype,n),S.NEED=!0;return F(c,e),s[e]=c,d(d.G+d.W+d.F*(c!=u),s),o||r.setStrong(c,e,i),c}},{117:117,118:118,124:124,37:37,62:62,64:64,68:68,70:70,75:75,81:81,86:86,94:94}],52:[function(t,n,r){arguments[4][18][0].apply(r,arguments)},{18:18}],53:[function(t,n,r){"use strict";var e=t(99),i=t(116);n.exports=function(t,n,r){n in t?e.f(t,n,i(0,r)):t[n]=r}},{116:116,99:99}],54:[function(t,n,r){arguments[4][19][0].apply(r,arguments)},{19:19,33:33}],55:[function(t,n,r){"use strict";function Qi(t){return 9<t?t:"0"+t}var e=t(64),i=Date.prototype.getTime,o=Date.prototype.toISOString;n.exports=e(function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-5e13-1))})||!e(function(){o.call(new Date(NaN))})?function toISOString(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var t=this,n=t.getUTCFullYear(),r=t.getUTCMilliseconds(),e=n<0?"-":9999<n?"+":"";return e+("00000"+Math.abs(n)).slice(e?-6:-4)+"-"+Qi(t.getUTCMonth()+1)+"-"+Qi(t.getUTCDate())+"T"+Qi(t.getUTCHours())+":"+Qi(t.getUTCMinutes())+":"+Qi(t.getUTCSeconds())+"."+(99<r?r:"0"+Qi(r))+"Z"}:o},{64:64}],56:[function(t,n,r){"use strict";var e=t(38),i=t(143);n.exports=function(t){if("string"!==t&&"number"!==t&&"default"!==t)throw TypeError("Incorrect hint");return i(e(this),"number"!=t)}},{143:143,38:38}],57:[function(t,n,r){n.exports=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t}},{}],58:[function(t,n,r){arguments[4][20][0].apply(r,arguments)},{20:20,64:64}],59:[function(t,n,r){arguments[4][21][0].apply(r,arguments)},{21:21,70:70,81:81}],60:[function(t,n,r){n.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},{}],61:[function(t,n,r){var c=t(107),a=t(104),f=t(108);n.exports=function(t){var n=c(t),r=a.f;if(r)for(var e,i=r(t),o=f.f,u=0;i.length>u;)o.call(t,e=i[u++])&&n.push(e);return n}},{104:104,107:107,108:108}],62:[function(t,n,r){var g=t(70),y=t(52),d=t(72),x=t(118),m=t(54),S="prototype",b=function(t,n,r){var e,i,o,u,c=t&b.F,a=t&b.G,f=t&b.S,s=t&b.P,l=t&b.B,h=a?g:f?g[n]||(g[n]={}):(g[n]||{})[S],p=a?y:y[n]||(y[n]={}),v=p[S]||(p[S]={});for(e in a&&(r=n),r)o=((i=!c&&h&&void 0!==h[e])?h:r)[e],u=l&&i?m(o,g):s&&"function"==typeof o?m(Function.call,o):o,h&&x(h,e,o,t&b.U),p[e]!=o&&d(p,e,u),s&&v[e]!=o&&(v[e]=o)};g.core=y,b.F=1,b.G=2,b.S=4,b.P=8,b.B=16,b.W=32,b.U=64,b.R=128,n.exports=b},{118:118,52:52,54:54,70:70,72:72}],63:[function(t,n,r){var e=t(152)("match");n.exports=function(n){var r=/./;try{"/./"[n](r)}catch(t){try{return r[e]=!1,!"/./"[n](r)}catch(t){}}return!0}},{152:152}],64:[function(t,n,r){arguments[4][23][0].apply(r,arguments)},{23:23}],65:[function(t,n,r){"use strict";t(248);var s=t(118),l=t(72),h=t(64),p=t(57),v=t(152),g=t(120),y=v("species"),d=!h(function(){var t=/./;return t.exec=function(){var t=[];return t.groups={a:"7"},t},"7"!=="".replace(t,"$<a>")}),x=function(){var t=/(?:)/,n=t.exec;t.exec=function(){return n.apply(this,arguments)};var r="ab".split(t);return 2===r.length&&"a"===r[0]&&"b"===r[1]}();n.exports=function(r,t,n){var e=v(r),o=!h(function(){var t={};return t[e]=function(){return 7},7!=""[r](t)}),i=o?!h(function(){var t=!1,n=/a/;return n.exec=function(){return t=!0,null},"split"===r&&(n.constructor={},n.constructor[y]=function(){return n}),n[e](""),!t}):void 0;if(!o||!i||"replace"===r&&!d||"split"===r&&!x){var u=/./[e],c=n(p,e,""[r],function maybeCallNative(t,n,r,e,i){return n.exec===g?o&&!i?{done:!0,value:u.call(n,r,e)}:{done:!0,value:t.call(r,n,e)}:{done:!1}}),a=c[0],f=c[1];s(String.prototype,r,a),l(RegExp.prototype,e,2==t?function(t,n){return f.call(t,this,n)}:function(t){return f.call(t,this)})}}},{118:118,120:120,152:152,248:248,57:57,64:64,72:72}],66:[function(t,n,r){"use strict";var e=t(38);n.exports=function(){var t=e(this),n="";return t.global&&(n+="g"),t.ignoreCase&&(n+="i"),t.multiline&&(n+="m"),t.unicode&&(n+="u"),t.sticky&&(n+="y"),n}},{38:38}],67:[function(t,n,r){"use strict";var p=t(79),v=t(81),g=t(141),y=t(54),d=t(152)("isConcatSpreadable");n.exports=function flattenIntoArray(t,n,r,e,i,o,u,c){for(var a,f,s=i,l=0,h=!!u&&y(u,c,3);l<e;){if(l in r){if(a=h?h(r[l],l,n):r[l],f=!1,v(a)&&(f=void 0!==(f=a[d])?!!f:p(a)),f&&0<o)s=flattenIntoArray(t,n,a,g(a.length),s,o-1)-1;else{if(9007199254740991<=s)throw TypeError();t[s]=a}s++}l++}return s}},{141:141,152:152,54:54,79:79,81:81}],68:[function(t,n,r){var h=t(54),p=t(83),v=t(78),g=t(38),y=t(141),d=t(153),x={},m={};(r=n.exports=function(t,n,r,e,i){var o,u,c,a,f=i?function(){return t}:d(t),s=h(r,e,n?2:1),l=0;if("function"!=typeof f)throw TypeError(t+" is not iterable!");if(v(f)){for(o=y(t.length);l<o;l++)if((a=n?s(g(u=t[l])[0],u[1]):s(t[l]))===x||a===m)return a}else for(c=f.call(t);!(u=c.next()).done;)if((a=p(c,s,u.value,n))===x||a===m)return a}).BREAK=x,r.RETURN=m},{141:141,153:153,38:38,54:54,78:78,83:83}],69:[function(t,n,r){n.exports=t(126)("native-function-to-string",Function.toString)},{126:126}],70:[function(t,n,r){arguments[4][24][0].apply(r,arguments)},{24:24}],71:[function(t,n,r){arguments[4][25][0].apply(r,arguments)},{25:25}],72:[function(t,n,r){arguments[4][26][0].apply(r,arguments)},{116:116,26:26,58:58,99:99}],73:[function(t,n,r){var e=t(70).document;n.exports=e&&e.documentElement},{70:70}],74:[function(t,n,r){arguments[4][27][0].apply(r,arguments)},{27:27,58:58,59:59,64:64}],75:[function(t,n,r){var o=t(81),u=t(122).set;n.exports=function(t,n,r){var e,i=n.constructor;return i!==r&&"function"==typeof i&&(e=i.prototype)!==r.prototype&&o(e)&&u&&u(t,e),t}},{122:122,81:81}],76:[function(t,n,r){n.exports=function(t,n,r){var e=void 0===r;switch(n.length){case 0:return e?t():t.call(r);case 1:return e?t(n[0]):t.call(r,n[0]);case 2:return e?t(n[0],n[1]):t.call(r,n[0],n[1]);case 3:return e?t(n[0],n[1],n[2]):t.call(r,n[0],n[1],n[2]);case 4:return e?t(n[0],n[1],n[2],n[3]):t.call(r,n[0],n[1],n[2],n[3])}return t.apply(r,n)}},{}],77:[function(t,n,r){var e=t(48);n.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==e(t)?t.split(""):Object(t)}},{48:48}],78:[function(t,n,r){var e=t(88),i=t(152)("iterator"),o=Array.prototype;n.exports=function(t){return void 0!==t&&(e.Array===t||o[i]===t)}},{152:152,88:88}],79:[function(t,n,r){var e=t(48);n.exports=Array.isArray||function isArray(t){return"Array"==e(t)}},{48:48}],80:[function(t,n,r){var e=t(81),i=Math.floor;n.exports=function isInteger(t){return!e(t)&&isFinite(t)&&i(t)===t}},{81:81}],81:[function(t,n,r){arguments[4][28][0].apply(r,arguments)},{28:28}],82:[function(t,n,r){var e=t(81),i=t(48),o=t(152)("match");n.exports=function(t){var n;return e(t)&&(void 0!==(n=t[o])?!!n:"RegExp"==i(t))}},{152:152,48:48,81:81}],83:[function(t,n,r){var o=t(38);n.exports=function(n,t,r,e){try{return e?t(o(r)[0],r[1]):t(r)}catch(t){var i=n.return;throw void 0!==i&&o(i.call(n)),t}}},{38:38}],84:[function(t,n,r){"use strict";var e=t(98),i=t(116),o=t(124),u={};t(72)(u,t(152)("iterator"),function(){return this}),n.exports=function(t,n,r){t.prototype=e(u,{next:i(1,r)}),o(t,n+" Iterator")}},{116:116,124:124,152:152,72:72,98:98}],85:[function(t,n,r){"use strict";function Qn(){return this}var x=t(89),m=t(62),S=t(118),b=t(72),w=t(88),_=t(84),E=t(124),O=t(105),F=t(152)("iterator"),I=!([].keys&&"next"in[].keys()),P="values";n.exports=function(t,n,r,e,i,o,u){_(r,n,e);function Yn(t){if(!I&&t in p)return p[t];switch(t){case"keys":return function keys(){return new r(this,t)};case P:return function values(){return new r(this,t)}}return function entries(){return new r(this,t)}}var c,a,f,s=n+" Iterator",l=i==P,h=!1,p=t.prototype,v=p[F]||p["@@iterator"]||i&&p[i],g=v||Yn(i),y=i?l?Yn("entries"):g:void 0,d="Array"==n&&p.entries||v;if(d&&(f=O(d.call(new t)))!==Object.prototype&&f.next&&(E(f,s,!0),x||"function"==typeof f[F]||b(f,F,Qn)),l&&v&&v.name!==P&&(h=!0,g=function values(){return v.call(this)}),x&&!u||!I&&!h&&p[F]||b(p,F,g),w[n]=g,w[s]=Qn,i)if(c={values:l?g:Yn(P),keys:o?g:Yn("keys"),entries:y},u)for(a in c)a in p||S(p,a,c[a]);else m(m.P+m.F*(I||h),n,c);return c}},{105:105,118:118,124:124,152:152,62:62,72:72,84:84,88:88,89:89}],86:[function(t,n,r){var o=t(152)("iterator"),u=!1;try{var e=[7][o]();e.return=function(){u=!0},Array.from(e,function(){throw 2})}catch(t){}n.exports=function(t,n){if(!n&&!u)return!1;var r=!1;try{var e=[7],i=e[o]();i.next=function(){return{done:r=!0}},e[o]=function(){return i},t(e)}catch(t){}return r}},{152:152}],87:[function(t,n,r){n.exports=function(t,n){return{value:n,done:!!t}}},{}],88:[function(t,n,r){n.exports={}},{}],89:[function(t,n,r){n.exports=!1},{}],90:[function(t,n,r){var e=Math.expm1;n.exports=!e||22025.465794806718<e(10)||e(10)<22025.465794806718||-2e-17!=e(-2e-17)?function expm1(t){return 0==(t=+t)?t:-1e-6<t&&t<1e-6?t+t*t/2:Math.exp(t)-1}:e},{}],91:[function(t,n,r){var o=t(93),e=Math.pow,u=e(2,-52),c=e(2,-23),a=e(2,127)*(2-c),f=e(2,-126);n.exports=Math.fround||function fround(t){var n,r,e=Math.abs(t),i=o(t);return e<f?i*function(t){return t+1/u-1/u}(e/f/c)*f*c:a<(r=(n=(1+c/u)*e)-(n-e))||r!=r?i*(1/0):i*r}},{93:93}],92:[function(t,n,r){n.exports=Math.log1p||function log1p(t){return-1e-8<(t=+t)&&t<1e-8?t-t*t/2:Math.log(1+t)}},{}],93:[function(t,n,r){n.exports=Math.sign||function sign(t){return 0==(t=+t)||t!=t?t:t<0?-1:1}},{}],94:[function(t,n,r){function tp(t){u(t,e,{value:{i:"O"+ ++c,w:{}}})}var e=t(147)("meta"),i=t(81),o=t(71),u=t(99).f,c=0,a=Object.isExtensible||function(){return!0},f=!t(64)(function(){return a(Object.preventExtensions({}))}),s=n.exports={KEY:e,NEED:!1,fastKey:function(t,n){if(!i(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!o(t,e)){if(!a(t))return"F";if(!n)return"E";tp(t)}return t[e].i},getWeak:function(t,n){if(!o(t,e)){if(!a(t))return!0;if(!n)return!1;tp(t)}return t[e].w},onFreeze:function(t){return f&&s.NEED&&a(t)&&!o(t,e)&&tp(t),t}}},{147:147,64:64,71:71,81:81,99:99}],95:[function(t,n,r){var u=t(70),c=t(136).set,a=u.MutationObserver||u.WebKitMutationObserver,f=u.process,s=u.Promise,l="process"==t(48)(f);n.exports=function(){function Qp(){var t,n;for(l&&(t=f.domain)&&t.exit();r;){n=r.fn,r=r.next;try{n()}catch(t){throw r?i():e=void 0,t}}e=void 0,t&&t.enter()}var r,e,i;if(l)i=function(){f.nextTick(Qp)};else if(!a||u.navigator&&u.navigator.standalone)if(s&&s.resolve){var t=s.resolve(void 0);i=function(){t.then(Qp)}}else i=function(){c.call(u,Qp)};else{var n=!0,o=document.createTextNode("");new a(Qp).observe(o,{characterData:!0}),i=function(){o.data=n=!n}}return function(t){var n={fn:t,next:void 0};e&&(e.next=n),r||(r=n,i()),e=n}}},{136:136,48:48,70:70}],96:[function(t,n,r){"use strict";var i=t(33);function PromiseCapability(t){var r,e;this.promise=new t(function(t,n){if(void 0!==r||void 0!==e)throw TypeError("Bad Promise constructor");r=t,e=n}),this.resolve=i(r),this.reject=i(e)}n.exports.f=function(t){return new PromiseCapability(t)}},{33:33}],97:[function(t,n,r){"use strict";var h=t(58),p=t(107),v=t(104),g=t(108),y=t(142),d=t(77),i=Object.assign;n.exports=!i||t(64)(function(){var t={},n={},r=Symbol(),e="abcdefghijklmnopqrst";return t[r]=7,e.split("").forEach(function(t){n[t]=t}),7!=i({},t)[r]||Object.keys(i({},n)).join("")!=e})?function assign(t,n){for(var r=y(t),e=arguments.length,i=1,o=v.f,u=g.f;i<e;)for(var c,a=d(arguments[i++]),f=o?p(a).concat(o(a)):p(a),s=f.length,l=0;l<s;)c=f[l++],h&&!u.call(a,c)||(r[c]=a[c]);return r}:i},{104:104,107:107,108:108,142:142,58:58,64:64,77:77}],98:[function(e,t,n){function Pq(){}var i=e(38),o=e(100),u=e(60),c=e(125)("IE_PROTO"),a="prototype",f=function(){var t,n=e(59)("iframe"),r=u.length;for(n.style.display="none",e(73).appendChild(n),n.src="javascript:",(t=n.contentWindow.document).open(),t.write("<script>document.F=Object<\/script>"),t.close(),f=t.F;r--;)delete f[a][u[r]];return f()};t.exports=Object.create||function create(t,n){var r;return null!==t?(Pq[a]=i(t),r=new Pq,Pq[a]=null,r[c]=t):r=f(),void 0===n?r:o(r,n)}},{100:100,125:125,38:38,59:59,60:60,73:73}],99:[function(t,n,r){arguments[4][29][0].apply(r,arguments)},{143:143,29:29,38:38,58:58,74:74}],100:[function(t,n,r){var u=t(99),c=t(38),a=t(107);n.exports=t(58)?Object.defineProperties:function defineProperties(t,n){c(t);for(var r,e=a(n),i=e.length,o=0;o<i;)u.f(t,r=e[o++],n[r]);return t}},{107:107,38:38,58:58,99:99}],101:[function(t,n,r){var e=t(108),i=t(116),o=t(140),u=t(143),c=t(71),a=t(74),f=Object.getOwnPropertyDescriptor;r.f=t(58)?f:function getOwnPropertyDescriptor(t,n){if(t=o(t),n=u(n,!0),a)try{return f(t,n)}catch(t){}if(c(t,n))return i(!e.f.call(t,n),t[n])}},{108:108,116:116,140:140,143:143,58:58,71:71,74:74}],102:[function(t,n,r){var e=t(140),i=t(103).f,o={}.toString,u="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[];n.exports.f=function getOwnPropertyNames(t){return u&&"[object Window]"==o.call(t)?function(t){try{return i(t)}catch(t){return u.slice()}}(t):i(e(t))}},{103:103,140:140}],103:[function(t,n,r){var e=t(106),i=t(60).concat("length","prototype");r.f=Object.getOwnPropertyNames||function getOwnPropertyNames(t){return e(t,i)}},{106:106,60:60}],104:[function(t,n,r){r.f=Object.getOwnPropertySymbols},{}],105:[function(t,n,r){var e=t(71),i=t(142),o=t(125)("IE_PROTO"),u=Object.prototype;n.exports=Object.getPrototypeOf||function(t){return t=i(t),e(t,o)?t[o]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?u:null}},{125:125,142:142,71:71}],106:[function(t,n,r){var u=t(71),c=t(140),a=t(41)(!1),f=t(125)("IE_PROTO");n.exports=function(t,n){var r,e=c(t),i=0,o=[];for(r in e)r!=f&&u(e,r)&&o.push(r);for(;n.length>i;)u(e,r=n[i++])&&(~a(o,r)||o.push(r));return o}},{125:125,140:140,41:41,71:71}],107:[function(t,n,r){var e=t(106),i=t(60);n.exports=Object.keys||function keys(t){return e(t,i)}},{106:106,60:60}],108:[function(t,n,r){r.f={}.propertyIsEnumerable},{}],109:[function(t,n,r){var i=t(62),o=t(52),u=t(64);n.exports=function(t,n){var r=(o.Object||{})[t]||Object[t],e={};e[t]=n(r),i(i.S+i.F*u(function(){r(1)}),"Object",e)}},{52:52,62:62,64:64}],110:[function(t,n,r){var a=t(58),f=t(107),s=t(140),l=t(108).f;n.exports=function(c){return function(t){for(var n,r=s(t),e=f(r),i=e.length,o=0,u=[];o<i;)n=e[o++],a&&!l.call(r,n)||u.push(c?[n,r[n]]:r[n]);return u}}},{107:107,108:108,140:140,58:58}],111:[function(t,n,r){var e=t(103),i=t(104),o=t(38),u=t(70).Reflect;n.exports=u&&u.ownKeys||function ownKeys(t){var n=e.f(o(t)),r=i.f;return r?n.concat(r(t)):n}},{103:103,104:104,38:38,70:70}],112:[function(t,n,r){var e=t(70).parseFloat,i=t(134).trim;n.exports=1/e(t(135)+"-0")!=-1/0?function parseFloat(t){var n=i(String(t),3),r=e(n);return 0===r&&"-"==n.charAt(0)?-0:r}:e},{134:134,135:135,70:70}],113:[function(t,n,r){var e=t(70).parseInt,i=t(134).trim,o=t(135),u=/^[-+]?0[xX]/;n.exports=8!==e(o+"08")||22!==e(o+"0x16")?function parseInt(t,n){var r=i(String(t),3);return e(r,n>>>0||(u.test(r)?16:10))}:e},{134:134,135:135,70:70}],114:[function(t,n,r){n.exports=function(t){try{return{e:!1,v:t()}}catch(t){return{e:!0,v:t}}}},{}],115:[function(t,n,r){var e=t(38),i=t(81),o=t(96);n.exports=function(t,n){if(e(t),i(n)&&n.constructor===t)return n;var r=o.f(t);return(0,r.resolve)(n),r.promise}},{38:38,81:81,96:96}],116:[function(t,n,r){arguments[4][30][0].apply(r,arguments)},{30:30}],117:[function(t,n,r){var i=t(118);n.exports=function(t,n,r){for(var e in n)i(t,e,n[e],r);return t}},{118:118}],118:[function(t,n,r){var o=t(70),u=t(72),c=t(71),a=t(147)("src"),e=t(69),i="toString",f=(""+e).split(i);t(52).inspectSource=function(t){return e.call(t)},(n.exports=function(t,n,r,e){var i="function"==typeof r;i&&(c(r,"name")||u(r,"name",n)),t[n]!==r&&(i&&(c(r,a)||u(r,a,t[n]?""+t[n]:f.join(String(n)))),t===o?t[n]=r:e?t[n]?t[n]=r:u(t,n,r):(delete t[n],u(t,n,r)))})(Function.prototype,i,function toString(){return"function"==typeof this&&this[a]||e.call(this)})},{147:147,52:52,69:69,70:70,71:71,72:72}],119:[function(t,n,r){"use strict";var i=t(47),o=RegExp.prototype.exec;n.exports=function(t,n){var r=t.exec;if("function"==typeof r){var e=r.call(t,n);if("object"!=typeof e)throw new TypeError("RegExp exec method returned something other than an Object or null");return e}if("RegExp"!==i(t))throw new TypeError("RegExp#exec called on incompatible receiver");return o.call(t,n)}},{47:47}],120:[function(t,n,r){"use strict";var e,i,u=t(66),c=RegExp.prototype.exec,a=String.prototype.replace,o=c,f="lastIndex",s=(e=/a/,i=/b*/g,c.call(e,"a"),c.call(i,"a"),0!==e[f]||0!==i[f]),l=void 0!==/()??/.exec("")[1];(s||l)&&(o=function exec(t){var n,r,e,i,o=this;return l&&(r=new RegExp("^"+o.source+"$(?!\\s)",u.call(o))),s&&(n=o[f]),e=c.call(o,t),s&&e&&(o[f]=o.global?e.index+e[0].length:n),l&&e&&1<e.length&&a.call(e[0],r,function(){for(i=1;i<arguments.length-2;i++)void 0===arguments[i]&&(e[i]=void 0)}),e}),n.exports=o},{66:66}],121:[function(t,n,r){n.exports=Object.is||function is(t,n){return t===n?0!==t||1/t==1/n:t!=t&&n!=n}},{}],122:[function(n,t,r){function Wu(t,n){if(i(t),!e(n)&&null!==n)throw TypeError(n+": can't set as prototype!")}var e=n(81),i=n(38);t.exports={set:Object.setPrototypeOf||("__proto__"in{}?function(t,r,e){try{(e=n(54)(Function.call,n(101).f(Object.prototype,"__proto__").set,2))(t,[]),r=!(t instanceof Array)}catch(t){r=!0}return function setPrototypeOf(t,n){return Wu(t,n),r?t.__proto__=n:e(t,n),t}}({},!1):void 0),check:Wu}},{101:101,38:38,54:54,81:81}],123:[function(t,n,r){"use strict";var e=t(70),i=t(99),o=t(58),u=t(152)("species");n.exports=function(t){var n=e[t];o&&n&&!n[u]&&i.f(n,u,{configurable:!0,get:function(){return this}})}},{152:152,58:58,70:70,99:99}],124:[function(t,n,r){var e=t(99).f,i=t(71),o=t(152)("toStringTag");n.exports=function(t,n,r){t&&!i(t=r?t:t.prototype,o)&&e(t,o,{configurable:!0,value:n})}},{152:152,71:71,99:99}],125:[function(t,n,r){var e=t(126)("keys"),i=t(147);n.exports=function(t){return e[t]||(e[t]=i(t))}},{126:126,147:147}],126:[function(t,n,r){var e=t(52),i=t(70),o="__core-js_shared__",u=i[o]||(i[o]={});(n.exports=function(t,n){return u[t]||(u[t]=void 0!==n?n:{})})("versions",[]).push({version:e.version,mode:t(89)?"pure":"global",copyright:"ยฉ 2019 Denis Pushkarev (zloirock.ru)"})},{52:52,70:70,89:89}],127:[function(t,n,r){var i=t(38),o=t(33),u=t(152)("species");n.exports=function(t,n){var r,e=i(t).constructor;return void 0===e||null==(r=i(e)[u])?n:o(r)}},{152:152,33:33,38:38}],128:[function(t,n,r){"use strict";var e=t(64);n.exports=function(t,n){return!!t&&e(function(){n?t.call(null,function(){},1):t.call(null)})}},{64:64}],129:[function(t,n,r){var a=t(139),f=t(57);n.exports=function(c){return function(t,n){var r,e,i=String(f(t)),o=a(n),u=i.length;return o<0||u<=o?c?"":void 0:(r=i.charCodeAt(o))<55296||56319<r||o+1===u||(e=i.charCodeAt(o+1))<56320||57343<e?c?i.charAt(o):r:c?i.slice(o,o+2):e-56320+(r-55296<<10)+65536}}},{139:139,57:57}],130:[function(t,n,r){var e=t(82),i=t(57);n.exports=function(t,n,r){if(e(n))throw TypeError("String#"+r+" doesn't accept regex!");return String(i(t))}},{57:57,82:82}],131:[function(t,n,r){function Aw(t,n,r,e){var i=String(u(t)),o="<"+n;return""!==r&&(o+=" "+r+'="'+String(e).replace(c,""")+'"'),o+">"+i+"</"+n+">"}var e=t(62),i=t(64),u=t(57),c=/"/g;n.exports=function(n,t){var r={};r[n]=t(Aw),e(e.P+e.F*i(function(){var t=""[n]('"');return t!==t.toLowerCase()||3<t.split('"').length}),"String",r)}},{57:57,62:62,64:64}],132:[function(t,n,r){var s=t(141),l=t(133),h=t(57);n.exports=function(t,n,r,e){var i=String(h(t)),o=i.length,u=void 0===r?" ":String(r),c=s(n);if(c<=o||""==u)return i;var a=c-o,f=l.call(u,Math.ceil(a/u.length));return f.length>a&&(f=f.slice(0,a)),e?f+i:i+f}},{133:133,141:141,57:57}],133:[function(t,n,r){"use strict";var i=t(139),o=t(57);n.exports=function repeat(t){var n=String(o(this)),r="",e=i(t);if(e<0||e==1/0)throw RangeError("Count can't be negative");for(;0<e;(e>>>=1)&&(n+=n))1&e&&(r+=n);return r}},{139:139,57:57}],134:[function(t,n,r){function tx(t,n,r){var e={},i=c(function(){return!!a[t]()||"โย
"!="โย
"[t]()}),o=e[t]=i?n(s):a[t];r&&(e[r]=o),u(u.P+u.F*i,"String",e)}var u=t(62),e=t(57),c=t(64),a=t(135),i="["+a+"]",o=RegExp("^"+i+i+"*"),f=RegExp(i+i+"*$"),s=tx.trim=function(t,n){return t=String(e(t)),1&n&&(t=t.replace(o,"")),2&n&&(t=t.replace(f,"")),t};n.exports=tx},{135:135,57:57,62:62,64:64}],135:[function(t,n,r){n.exports="\t\n\v\f\r แแ โโโโโโ
โโโโโโฏโใ\u2028\u2029\ufeff"},{}],136:[function(t,n,r){function Zx(){var t=+this;if(d.hasOwnProperty(t)){var n=d[t];delete d[t],n()}}function $x(t){Zx.call(t.data)}var e,i,o,u=t(54),c=t(76),a=t(73),f=t(59),s=t(70),l=s.process,h=s.setImmediate,p=s.clearImmediate,v=s.MessageChannel,g=s.Dispatch,y=0,d={},x="onreadystatechange";h&&p||(h=function setImmediate(t){for(var n=[],r=1;r<arguments.length;)n.push(arguments[r++]);return d[++y]=function(){c("function"==typeof t?t:Function(t),n)},e(y),y},p=function clearImmediate(t){delete d[t]},"process"==t(48)(l)?e=function(t){l.nextTick(u(Zx,t,1))}:g&&g.now?e=function(t){g.now(u(Zx,t,1))}:v?(o=(i=new v).port2,i.port1.onmessage=$x,e=u(o.postMessage,o,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts?(e=function(t){s.postMessage(t+"","*")},s.addEventListener("message",$x,!1)):e=x in f("script")?function(t){a.appendChild(f("script"))[x]=function(){a.removeChild(this),Zx.call(t)}}:function(t){setTimeout(u(Zx,t,1),0)}),n.exports={set:h,clear:p}},{48:48,54:54,59:59,70:70,73:73,76:76}],137:[function(t,n,r){var e=t(139),i=Math.max,o=Math.min;n.exports=function(t,n){return(t=e(t))<0?i(t+n,0):o(t,n)}},{139:139}],138:[function(t,n,r){var e=t(139),i=t(141);n.exports=function(t){if(void 0===t)return 0;var n=e(t),r=i(n);if(n!==r)throw RangeError("Wrong length!");return r}},{139:139,141:141}],139:[function(t,n,r){var e=Math.ceil,i=Math.floor;n.exports=function(t){return isNaN(t=+t)?0:(0<t?i:e)(t)}},{}],140:[function(t,n,r){var e=t(77),i=t(57);n.exports=function(t){return e(i(t))}},{57:57,77:77}],141:[function(t,n,r){var e=t(139),i=Math.min;n.exports=function(t){return 0<t?i(e(t),9007199254740991):0}},{139:139}],142:[function(t,n,r){var e=t(57);n.exports=function(t){return Object(e(t))}},{57:57}],143:[function(t,n,r){arguments[4][31][0].apply(r,arguments)},{31:31,81:81}],144:[function(t,n,r){"use strict";if(t(58)){var y=t(89),d=t(70),x=t(64),m=t(62),S=t(146),e=t(145),h=t(54),b=t(37),i=t(116),w=t(72),o=t(117),u=t(139),_=t(141),E=t(138),c=t(137),a=t(143),f=t(71),O=t(47),F=t(81),p=t(142),v=t(78),I=t(98),P=t(105),A=t(103).f,g=t(153),s=t(147),l=t(152),M=t(42),k=t(41),N=t(127),j=t(164),R=t(88),T=t(86),L=t(123),C=t(40),G=t(39),D=t(99),U=t(101),W=D.f,V=U.f,B=d.RangeError,q=d.TypeError,Y=d.Uint8Array,z="ArrayBuffer",X="Shared"+z,$="BYTES_PER_ELEMENT",Q="prototype",Z=Array[Q],J=e.ArrayBuffer,H=e.DataView,K=M(0),tt=M(2),nt=M(3),rt=M(4),et=M(5),it=M(6),ot=k(!0),ut=k(!1),ct=j.values,at=j.keys,ft=j.entries,st=Z.lastIndexOf,lt=Z.reduce,ht=Z.reduceRight,pt=Z.join,vt=Z.sort,gt=Z.slice,yt=Z.toString,dt=Z.toLocaleString,xt=l("iterator"),mt=l("toStringTag"),St=s("typed_constructor"),bt=s("def_constructor"),wt=S.CONSTR,_t=S.TYPED,Et=S.VIEW,Ot="Wrong length!",Ft=M(1,function(t,n){return kt(N(t,t[bt]),n)}),It=x(function(){return 1===new Y(new Uint16Array([1]).buffer)[0]}),Pt=!!Y&&!!Y[Q].set&&x(function(){new Y(1).set({})}),At=function(t,n){var r=u(t);if(r<0||r%n)throw B("Wrong offset!");return r},Mt=function(t){if(F(t)&&_t in t)return t;throw q(t+" is not a typed array!")},kt=function(t,n){if(!(F(t)&&St in t))throw q("It is not a typed array constructor!");return new t(n)},Nt=function(t,n){return jt(N(t,t[bt]),n)},jt=function(t,n){for(var r=0,e=n.length,i=kt(t,e);r<e;)i[r]=n[r++];return i},Rt=function(t,n,r){W(t,n,{get:function(){return this._d[r]}})},Tt=function from(t){var n,r,e,i,o,u,c=p(t),a=arguments.length,f=1<a?arguments[1]:void 0,s=void 0!==f,l=g(c);if(null!=l&&!v(l)){for(u=l.call(c),e=[],n=0;!(o=u.next()).done;n++)e.push(o.value);c=e}for(s&&2<a&&(f=h(f,arguments[2],2)),n=0,r=_(c.length),i=kt(this,r);n<r;n++)i[n]=s?f(c[n],n):c[n];return i},Lt=function of(){for(var t=0,n=arguments.length,r=kt(this,n);t<n;)r[t]=arguments[t++];return r},Ct=!!Y&&x(function(){dt.call(new Y(1))}),Gt=function toLocaleString(){return dt.apply(Ct?gt.call(Mt(this)):Mt(this),arguments)},Dt={copyWithin:function copyWithin(t,n){return G.call(Mt(this),t,n,2<arguments.length?arguments[2]:void 0)},every:function every(t){return rt(Mt(this),t,1<arguments.length?arguments[1]:void 0)},fill:function fill(t){return C.apply(Mt(this),arguments)},filter:function filter(t){return Nt(this,tt(Mt(this),t,1<arguments.length?arguments[1]:void 0))},find:function find(t){return et(Mt(this),t,1<arguments.length?arguments[1]:void 0)},findIndex:function findIndex(t){return it(Mt(this),t,1<arguments.length?arguments[1]:void 0)},forEach:function forEach(t){K(Mt(this),t,1<arguments.length?arguments[1]:void 0)},indexOf:function indexOf(t){return ut(Mt(this),t,1<arguments.length?arguments[1]:void 0)},includes:function includes(t){return ot(Mt(this),t,1<arguments.length?arguments[1]:void 0)},join:function join(t){return pt.apply(Mt(this),arguments)},lastIndexOf:function lastIndexOf(t){return st.apply(Mt(this),arguments)},map:function map(t){return Ft(Mt(this),t,1<arguments.length?arguments[1]:void 0)},reduce:function reduce(t){return lt.apply(Mt(this),arguments)},reduceRight:function reduceRight(t){return ht.apply(Mt(this),arguments)},reverse:function reverse(){for(var t,n=this,r=Mt(n).length,e=Math.floor(r/2),i=0;i<e;)t=n[i],n[i++]=n[--r],n[r]=t;return n},some:function some(t){return nt(Mt(this),t,1<arguments.length?arguments[1]:void 0)},sort:function sort(t){return vt.call(Mt(this),t)},subarray:function subarray(t,n){var r=Mt(this),e=r.length,i=c(t,e);return new(N(r,r[bt]))(r.buffer,r.byteOffset+i*r.BYTES_PER_ELEMENT,_((void 0===n?e:c(n,e))-i))}},Ut=function slice(t,n){return Nt(this,gt.call(Mt(this),t,n))},Wt=function set(t){Mt(this);var n=At(arguments[1],1),r=this.length,e=p(t),i=_(e.length),o=0;if(r<i+n)throw B(Ot);for(;o<i;)this[n+o]=e[o++]},Vt={entries:function entries(){return ft.call(Mt(this))},keys:function keys(){return at.call(Mt(this))},values:function values(){return ct.call(Mt(this))}},Bt=function(t,n){return F(t)&&t[_t]&&"symbol"!=typeof n&&n in t&&String(+n)==String(n)},qt=function getOwnPropertyDescriptor(t,n){return Bt(t,n=a(n,!0))?i(2,t[n]):V(t,n)},Yt=function defineProperty(t,n,r){return!(Bt(t,n=a(n,!0))&&F(r)&&f(r,"value"))||f(r,"get")||f(r,"set")||r.configurable||f(r,"writable")&&!r.writable||f(r,"enumerable")&&!r.enumerable?W(t,n,r):(t[n]=r.value,t)};wt||(U.f=qt,D.f=Yt),m(m.S+m.F*!wt,"Object",{getOwnPropertyDescriptor:qt,defineProperty:Yt}),x(function(){yt.call({})})&&(yt=dt=function toString(){return pt.call(this)});var zt=o({},Dt);o(zt,Vt),w(zt,xt,Vt.values),o(zt,{slice:Ut,set:Wt,constructor:function(){},toString:yt,toLocaleString:Gt}),Rt(zt,"buffer","b"),Rt(zt,"byteOffset","o"),Rt(zt,"byteLength","l"),Rt(zt,"length","e"),W(zt,mt,{get:function(){return this[_t]}}),n.exports=function(t,l,n,i){function CC(t,n){W(t,n,{get:function(){return function(t,n){var r=t._d;return r.v[e](n*l+r.o,It)}(this,n)},set:function(t){return function(t,n,r){var e=t._d;i&&(r=(r=Math.round(r))<0?0:255<r?255:255&r),e.v[o](n*l+e.o,r,It)}(this,n,t)},enumerable:!0})}var h=t+((i=!!i)?"Clamped":"")+"Array",e="get"+t,o="set"+t,p=d[h],u=p||{},r=p&&P(p),c=!p||!S.ABV,a={},f=p&&p[Q];c?(p=n(function(t,n,r,e){b(t,p,h,"_d");var i,o,u,c,a=0,f=0;if(F(n)){if(!(n instanceof J||(c=O(n))==z||c==X))return _t in n?jt(p,n):Tt.call(p,n);i=n,f=At(r,l);var s=n.byteLength;if(void 0===e){if(s%l)throw B(Ot);if((o=s-f)<0)throw B(Ot)}else if(s<(o=_(e)*l)+f)throw B(Ot);u=o/l}else u=E(n),i=new J(o=u*l);for(w(t,"_d",{b:i,o:f,l:o,e:u,v:new H(i)});a<u;)CC(t,a++)}),f=p[Q]=I(zt),w(f,"constructor",p)):x(function(){p(1)})&&x(function(){new p(-1)})&&T(function(t){new p,new p(null),new p(1.5),new p(t)},!0)||(p=n(function(t,n,r,e){var i;return b(t,p,h),F(n)?n instanceof J||(i=O(n))==z||i==X?void 0!==e?new u(n,At(r,l),e):void 0!==r?new u(n,At(r,l)):new u(n):_t in n?jt(p,n):Tt.call(p,n):new u(E(n))}),K(r!==Function.prototype?A(u).concat(A(r)):A(u),function(t){t in p||w(p,t,u[t])}),p[Q]=f,y||(f.constructor=p));var s=f[xt],v=!!s&&("values"==s.name||null==s.name),g=Vt.values;w(p,St,!0),w(f,_t,h),w(f,Et,!0),w(f,bt,p),(i?new p(1)[mt]==h:mt in f)||W(f,mt,{get:function(){return h}}),a[h]=p,m(m.G+m.W+m.F*(p!=u),a),m(m.S,h,{BYTES_PER_ELEMENT:l}),m(m.S+m.F*x(function(){u.of.call(p,1)}),h,{from:Tt,of:Lt}),$ in f||w(f,$,l),m(m.P,h,Dt),L(h),m(m.P+m.F*Pt,h,{set:Wt}),m(m.P+m.F*!v,h,Vt),y||f.toString==yt||(f.toString=yt),m(m.P+m.F*x(function(){new p(1).slice()}),h,{slice:Ut}),m(m.P+m.F*(x(function(){return[1,2].toLocaleString()!=new p([1,2]).toLocaleString()})||!x(function(){f.toLocaleString.call([1,2])})),h,{toLocaleString:Gt}),R[h]=v?s:g,y||v||w(f,xt,g)}}else n.exports=function(){}},{101:101,103:103,105:105,116:116,117:117,123:123,127:127,137:137,138:138,139:139,141:141,142:142,143:143,145:145,146:146,147:147,152:152,153:153,164:164,37:37,39:39,40:40,41:41,42:42,47:47,54:54,58:58,62:62,64:64,70:70,71:71,72:72,78:78,81:81,86:86,88:88,89:89,98:98,99:99}],145:[function(t,n,r){"use strict";var e=t(70),i=t(58),o=t(89),u=t(146),c=t(72),a=t(117),f=t(64),s=t(37),l=t(139),h=t(141),p=t(138),v=t(103).f,g=t(99).f,y=t(40),d=t(124),x="ArrayBuffer",m="DataView",S="prototype",b="Wrong index!",w=e[x],_=e[m],E=e.Math,O=e.RangeError,F=e.Infinity,I=w,P=E.abs,A=E.pow,M=E.floor,k=E.log,N=E.LN2,j="byteLength",R="byteOffset",T=i?"_b":"buffer",L=i?"_l":j,C=i?"_o":R;function packIEEE754(t,n,r){var e,i,o,u=new Array(r),c=8*r-n-1,a=(1<<c)-1,f=a>>1,s=23===n?A(2,-24)-A(2,-77):0,l=0,h=t<0||0===t&&1/t<0?1:0;for((t=P(t))!=t||t===F?(i=t!=t?1:0,e=a):(e=M(k(t)/N),t*(o=A(2,-e))<1&&(e--,o*=2),2<=(t+=1<=e+f?s/o:s*A(2,1-f))*o&&(e++,o/=2),a<=e+f?(i=0,e=a):1<=e+f?(i=(t*o-1)*A(2,n),e+=f):(i=t*A(2,f-1)*A(2,n),e=0));8<=n;u[l++]=255&i,i/=256,n-=8);for(e=e<<n|i,c+=n;0<c;u[l++]=255&e,e/=256,c-=8);return u[--l]|=128*h,u}function unpackIEEE754(t,n,r){var e,i=8*r-n-1,o=(1<<i)-1,u=o>>1,c=i-7,a=r-1,f=t[a--],s=127&f;for(f>>=7;0<c;s=256*s+t[a],a--,c-=8);for(e=s&(1<<-c)-1,s>>=-c,c+=n;0<c;e=256*e+t[a],a--,c-=8);if(0===s)s=1-u;else{if(s===o)return e?NaN:f?-F:F;e+=A(2,n),s-=u}return(f?-1:1)*e*A(2,s-n)}function unpackI32(t){return t[3]<<24|t[2]<<16|t[1]<<8|t[0]}function packI8(t){return[255&t]}function packI16(t){return[255&t,t>>8&255]}function packI32(t){return[255&t,t>>8&255,t>>16&255,t>>24&255]}function packF64(t){return packIEEE754(t,52,8)}function packF32(t){return packIEEE754(t,23,4)}function addGetter(t,n,r){g(t[S],n,{get:function(){return this[r]}})}function get(t,n,r,e){var i=p(+r);if(i+n>t[L])throw O(b);var o=t[T]._b,u=i+t[C],c=o.slice(u,u+n);return e?c:c.reverse()}function set(t,n,r,e,i,o){var u=p(+r);if(u+n>t[L])throw O(b);for(var c=t[T]._b,a=u+t[C],f=e(+i),s=0;s<n;s++)c[a+s]=f[o?s:n-s-1]}if(u.ABV){if(!f(function(){w(1)})||!f(function(){new w(-1)})||f(function(){return new w,new w(1.5),new w(NaN),w.name!=x})){for(var G,D=(w=function ArrayBuffer(t){return s(this,w),new I(p(t))})[S]=I[S],U=v(I),W=0;U.length>W;)(G=U[W++])in w||c(w,G,I[G]);o||(D.constructor=w)}var V=new _(new w(2)),B=_[S].setInt8;V.setInt8(0,2147483648),V.setInt8(1,2147483649),!V.getInt8(0)&&V.getInt8(1)||a(_[S],{setInt8:function setInt8(t,n){B.call(this,t,n<<24>>24)},setUint8:function setUint8(t,n){B.call(this,t,n<<24>>24)}},!0)}else w=function ArrayBuffer(t){s(this,w,x);var n=p(t);this._b=y.call(new Array(n),0),this[L]=n},_=function DataView(t,n,r){s(this,_,m),s(t,w,m);var e=t[L],i=l(n);if(i<0||e<i)throw O("Wrong offset!");if(e<i+(r=void 0===r?e-i:h(r)))throw O("Wrong length!");this[T]=t,this[C]=i,this[L]=r},i&&(addGetter(w,j,"_l"),addGetter(_,"buffer","_b"),addGetter(_,j,"_l"),addGetter(_,R,"_o")),a(_[S],{getInt8:function getInt8(t){return get(this,1,t)[0]<<24>>24},getUint8:function getUint8(t){return get(this,1,t)[0]},getInt16:function getInt16(t){var n=get(this,2,t,arguments[1]);return(n[1]<<8|n[0])<<16>>16},getUint16:function getUint16(t){var n=get(this,2,t,arguments[1]);return n[1]<<8|n[0]},getInt32:function getInt32(t){return unpackI32(get(this,4,t,arguments[1]))},getUint32:function getUint32(t){return unpackI32(get(this,4,t,arguments[1]))>>>0},getFloat32:function getFloat32(t){return unpackIEEE754(get(this,4,t,arguments[1]),23,4)},getFloat64:function getFloat64(t){return unpackIEEE754(get(this,8,t,arguments[1]),52,8)},setInt8:function setInt8(t,n){set(this,1,t,packI8,n)},setUint8:function setUint8(t,n){set(this,1,t,packI8,n)},setInt16:function setInt16(t,n){set(this,2,t,packI16,n,arguments[2])},setUint16:function setUint16(t,n){set(this,2,t,packI16,n,arguments[2])},setInt32:function setInt32(t,n){set(this,4,t,packI32,n,arguments[2])},setUint32:function setUint32(t,n){set(this,4,t,packI32,n,arguments[2])},setFloat32:function setFloat32(t,n){set(this,4,t,packF32,n,arguments[2])},setFloat64:function setFloat64(t,n){set(this,8,t,packF64,n,arguments[2])}});d(w,x),d(_,m),c(_[S],u.VIEW,!0),r[x]=w,r[m]=_},{103:103,117:117,124:124,138:138,139:139,141:141,146:146,37:37,40:40,58:58,64:64,70:70,72:72,89:89,99:99}],146:[function(t,n,r){for(var e,i=t(70),o=t(72),u=t(147),c=u("typed_array"),a=u("view"),f=!(!i.ArrayBuffer||!i.DataView),s=f,l=0,h="Int8Array,Uint8Array,Uint8ClampedArray,Int16Array,Uint16Array,Int32Array,Uint32Array,Float32Array,Float64Array".split(",");l<9;)(e=i[h[l++]])?(o(e.prototype,c,!0),o(e.prototype,a,!0)):s=!1;n.exports={ABV:f,CONSTR:s,TYPED:c,VIEW:a}},{147:147,70:70,72:72}],147:[function(t,n,r){var e=0,i=Math.random();n.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++e+i).toString(36))}},{}],148:[function(t,n,r){var e=t(70).navigator;n.exports=e&&e.userAgent||""},{70:70}],149:[function(t,n,r){var e=t(81);n.exports=function(t,n){if(!e(t)||t._t!==n)throw TypeError("Incompatible receiver, "+n+" required!");return t}},{81:81}],150:[function(t,n,r){var e=t(70),i=t(52),o=t(89),u=t(151),c=t(99).f;n.exports=function(t){var n=i.Symbol||(i.Symbol=o?{}:e.Symbol||{});"_"==t.charAt(0)||t in n||c(n,t,{value:u.f(t)})}},{151:151,52:52,70:70,89:89,99:99}],151:[function(t,n,r){r.f=t(152)},{152:152}],152:[function(t,n,r){var e=t(126)("wks"),i=t(147),o=t(70).Symbol,u="function"==typeof o;(n.exports=function(t){return e[t]||(e[t]=u&&o[t]||(u?o:i)("Symbol."+t))}).store=e},{126:126,147:147,70:70}],153:[function(t,n,r){var e=t(47),i=t(152)("iterator"),o=t(88);n.exports=t(52).getIteratorMethod=function(t){if(null!=t)return t[i]||t["@@iterator"]||o[e(t)]}},{152:152,47:47,52:52,88:88}],154:[function(t,n,r){var e=t(62);e(e.P,"Array",{copyWithin:t(39)}),t(35)("copyWithin")},{35:35,39:39,62:62}],155:[function(t,n,r){"use strict";var e=t(62),i=t(42)(4);e(e.P+e.F*!t(128)([].every,!0),"Array",{every:function every(t){return i(this,t,arguments[1])}})},{128:128,42:42,62:62}],156:[function(t,n,r){var e=t(62);e(e.P,"Array",{fill:t(40)}),t(35)("fill")},{35:35,40:40,62:62}],157:[function(t,n,r){"use strict";var e=t(62),i=t(42)(2);e(e.P+e.F*!t(128)([].filter,!0),"Array",{filter:function filter(t){return i(this,t,arguments[1])}})},{128:128,42:42,62:62}],158:[function(t,n,r){"use strict";var e=t(62),i=t(42)(6),o="findIndex",u=!0;o in[]&&Array(1)[o](function(){u=!1}),e(e.P+e.F*u,"Array",{findIndex:function findIndex(t){return i(this,t,1<arguments.length?arguments[1]:void 0)}}),t(35)(o)},{35:35,42:42,62:62}],159:[function(t,n,r){"use strict";var e=t(62),i=t(42)(5),o="find",u=!0;o in[]&&Array(1)[o](function(){u=!1}),e(e.P+e.F*u,"Array",{find:function find(t){return i(this,t,1<arguments.length?arguments[1]:void 0)}}),t(35)(o)},{35:35,42:42,62:62}],160:[function(t,n,r){"use strict";var e=t(62),i=t(42)(0),o=t(128)([].forEach,!0);e(e.P+e.F*!o,"Array",{forEach:function forEach(t){return i(this,t,arguments[1])}})},{128:128,42:42,62:62}],161:[function(t,n,r){"use strict";var h=t(54),e=t(62),p=t(142),v=t(83),g=t(78),y=t(141),d=t(53),x=t(153);e(e.S+e.F*!t(86)(function(t){Array.from(t)}),"Array",{from:function from(t){var n,r,e,i,o=p(t),u="function"==typeof this?this:Array,c=arguments.length,a=1<c?arguments[1]:void 0,f=void 0!==a,s=0,l=x(o);if(f&&(a=h(a,2<c?arguments[2]:void 0,2)),null==l||u==Array&&g(l))for(r=new u(n=y(o.length));s<n;s++)d(r,s,f?a(o[s],s):o[s]);else for(i=l.call(o),r=new u;!(e=i.next()).done;s++)d(r,s,f?v(i,a,[e.value,s],!0):e.value);return r.length=s,r}})},{141:141,142:142,153:153,53:53,54:54,62:62,78:78,83:83,86:86}],162:[function(t,n,r){"use strict";var e=t(62),i=t(41)(!1),o=[].indexOf,u=!!o&&1/[1].indexOf(1,-0)<0;e(e.P+e.F*(u||!t(128)(o)),"Array",{indexOf:function indexOf(t){return u?o.apply(this,arguments)||0:i(this,t,arguments[1])}})},{128:128,41:41,62:62}],163:[function(t,n,r){var e=t(62);e(e.S,"Array",{isArray:t(79)})},{62:62,79:79}],164:[function(t,n,r){"use strict";var e=t(35),i=t(87),o=t(88),u=t(140);n.exports=t(85)(Array,"Array",function(t,n){this._t=u(t),this._i=0,this._k=n},function(){var t=this._t,n=this._k,r=this._i++;return!t||r>=t.length?(this._t=void 0,i(1)):i(0,"keys"==n?r:"values"==n?t[r]:[r,t[r]])},"values"),o.Arguments=o.Array,e("keys"),e("values"),e("entries")},{140:140,35:35,85:85,87:87,88:88}],165:[function(t,n,r){"use strict";var e=t(62),i=t(140),o=[].join;e(e.P+e.F*(t(77)!=Object||!t(128)(o)),"Array",{join:function join(t){return o.call(i(this),void 0===t?",":t)}})},{128:128,140:140,62:62,77:77}],166:[function(t,n,r){"use strict";var e=t(62),i=t(140),o=t(139),u=t(141),c=[].lastIndexOf,a=!!c&&1/[1].lastIndexOf(1,-0)<0;e(e.P+e.F*(a||!t(128)(c)),"Array",{lastIndexOf:function lastIndexOf(t){if(a)return c.apply(this,arguments)||0;var n=i(this),r=u(n.length),e=r-1;for(1<arguments.length&&(e=Math.min(e,o(arguments[1]))),e<0&&(e=r+e);0<=e;e--)if(e in n&&n[e]===t)return e||0;return-1}})},{128:128,139:139,140:140,141:141,62:62}],167:[function(t,n,r){"use strict";var e=t(62),i=t(42)(1);e(e.P+e.F*!t(128)([].map,!0),"Array",{map:function map(t){return i(this,t,arguments[1])}})},{128:128,42:42,62:62}],168:[function(t,n,r){"use strict";var e=t(62),i=t(53);e(e.S+e.F*t(64)(function(){function F(){}return!(Array.of.call(F)instanceof F)}),"Array",{of:function of(){for(var t=0,n=arguments.length,r=new("function"==typeof this?this:Array)(n);t<n;)i(r,t,arguments[t++]);return r.length=n,r}})},{53:53,62:62,64:64}],169:[function(t,n,r){"use strict";var e=t(62),i=t(43);e(e.P+e.F*!t(128)([].reduceRight,!0),"Array",{reduceRight:function reduceRight(t){return i(this,t,arguments.length,arguments[1],!0)}})},{128:128,43:43,62:62}],170:[function(t,n,r){"use strict";var e=t(62),i=t(43);e(e.P+e.F*!t(128)([].reduce,!0),"Array",{reduce:function reduce(t){return i(this,t,arguments.length,arguments[1],!1)}})},{128:128,43:43,62:62}],171:[function(t,n,r){"use strict";var e=t(62),i=t(73),f=t(48),s=t(137),l=t(141),h=[].slice;e(e.P+e.F*t(64)(function(){i&&h.call(i)}),"Array",{slice:function slice(t,n){var r=l(this.length),e=f(this);if(n=void 0===n?r:n,"Array"==e)return h.call(this,t,n);for(var i=s(t,r),o=s(n,r),u=l(o-i),c=new Array(u),a=0;a<u;a++)c[a]="String"==e?this.charAt(i+a):this[i+a];return c}})},{137:137,141:141,48:48,62:62,64:64,73:73}],172:[function(t,n,r){"use strict";var e=t(62),i=t(42)(3);e(e.P+e.F*!t(128)([].some,!0),"Array",{some:function some(t){return i(this,t,arguments[1])}})},{128:128,42:42,62:62}],173:[function(t,n,r){"use strict";var e=t(62),i=t(33),o=t(142),u=t(64),c=[].sort,a=[1,2,3];e(e.P+e.F*(u(function(){a.sort(void 0)})||!u(function(){a.sort(null)})||!t(128)(c)),"Array",{sort:function sort(t){return void 0===t?c.call(o(this)):c.call(o(this),i(t))}})},{128:128,142:142,33:33,62:62,64:64}],174:[function(t,n,r){t(123)("Array")},{123:123}],175:[function(t,n,r){var e=t(62);e(e.S,"Date",{now:function(){return(new Date).getTime()}})},{62:62}],176:[function(t,n,r){var e=t(62),i=t(55);e(e.P+e.F*(Date.prototype.toISOString!==i),"Date",{toISOString:i})},{55:55,62:62}],177:[function(t,n,r){"use strict";var e=t(62),i=t(142),o=t(143);e(e.P+e.F*t(64)(function(){return null!==new Date(NaN).toJSON()||1!==Date.prototype.toJSON.call({toISOString:function(){return 1}})}),"Date",{toJSON:function toJSON(t){var n=i(this),r=o(n);return"number"!=typeof r||isFinite(r)?n.toISOString():null}})},{142:142,143:143,62:62,64:64}],178:[function(t,n,r){var e=t(152)("toPrimitive"),i=Date.prototype;e in i||t(72)(i,e,t(56))},{152:152,56:56,72:72}],179:[function(t,n,r){var e=Date.prototype,i="Invalid Date",o="toString",u=e[o],c=e.getTime;new Date(NaN)+""!=i&&t(118)(e,o,function toString(){var t=c.call(this);return t==t?u.call(this):i})},{118:118}],180:[function(t,n,r){var e=t(62);e(e.P,"Function",{bind:t(46)})},{46:46,62:62}],181:[function(t,n,r){"use strict";var e=t(81),i=t(105),o=t(152)("hasInstance"),u=Function.prototype;o in u||t(99).f(u,o,{value:function(t){if("function"!=typeof this||!e(t))return!1;if(!e(this.prototype))return t instanceof this;for(;t=i(t);)if(this.prototype===t)return!0;return!1}})},{105:105,152:152,81:81,99:99}],182:[function(t,n,r){var e=t(99).f,i=Function.prototype,o=/^\s*function ([^ (]*)/;"name"in i||t(58)&&e(i,"name",{configurable:!0,get:function(){try{return(""+this).match(o)[1]}catch(t){return""}}})},{58:58,99:99}],183:[function(t,n,r){"use strict";var e=t(49),i=t(149);n.exports=t(51)("Map",function(t){return function Map(){return t(this,0<arguments.length?arguments[0]:void 0)}},{get:function get(t){var n=e.getEntry(i(this,"Map"),t);return n&&n.v},set:function set(t,n){return e.def(i(this,"Map"),0===t?0:t,n)}},e,!0)},{149:149,49:49,51:51}],184:[function(t,n,r){var e=t(62),i=t(92),o=Math.sqrt,u=Math.acosh;e(e.S+e.F*!(u&&710==Math.floor(u(Number.MAX_VALUE))&&u(1/0)==1/0),"Math",{acosh:function acosh(t){return(t=+t)<1?NaN:94906265.62425156<t?Math.log(t)+Math.LN2:i(t-1+o(t-1)*o(t+1))}})},{62:62,92:92}],185:[function(t,n,r){var e=t(62),i=Math.asinh;e(e.S+e.F*!(i&&0<1/i(0)),"Math",{asinh:function asinh(t){return isFinite(t=+t)&&0!=t?t<0?-asinh(-t):Math.log(t+Math.sqrt(t*t+1)):t}})},{62:62}],186:[function(t,n,r){var e=t(62),i=Math.atanh;e(e.S+e.F*!(i&&1/i(-0)<0),"Math",{atanh:function atanh(t){return 0==(t=+t)?t:Math.log((1+t)/(1-t))/2}})},{62:62}],187:[function(t,n,r){var e=t(62),i=t(93);e(e.S,"Math",{cbrt:function cbrt(t){return i(t=+t)*Math.pow(Math.abs(t),1/3)}})},{62:62,93:93}],188:[function(t,n,r){var e=t(62);e(e.S,"Math",{clz32:function clz32(t){return(t>>>=0)?31-Math.floor(Math.log(t+.5)*Math.LOG2E):32}})},{62:62}],189:[function(t,n,r){var e=t(62),i=Math.exp;e(e.S,"Math",{cosh:function cosh(t){return(i(t=+t)+i(-t))/2}})},{62:62}],190:[function(t,n,r){var e=t(62),i=t(90);e(e.S+e.F*(i!=Math.expm1),"Math",{expm1:i})},{62:62,90:90}],191:[function(t,n,r){var e=t(62);e(e.S,"Math",{fround:t(91)})},{62:62,91:91}],192:[function(t,n,r){var e=t(62),a=Math.abs;e(e.S,"Math",{hypot:function hypot(t,n){for(var r,e,i=0,o=0,u=arguments.length,c=0;o<u;)c<(r=a(arguments[o++]))?(i=i*(e=c/r)*e+1,c=r):i+=0<r?(e=r/c)*e:r;return c===1/0?1/0:c*Math.sqrt(i)}})},{62:62}],193:[function(t,n,r){var e=t(62),i=Math.imul;e(e.S+e.F*t(64)(function(){return-5!=i(4294967295,5)||2!=i.length}),"Math",{imul:function imul(t,n){var r=65535,e=+t,i=+n,o=r&e,u=r&i;return 0|o*u+((r&e>>>16)*u+o*(r&i>>>16)<<16>>>0)}})},{62:62,64:64}],194:[function(t,n,r){var e=t(62);e(e.S,"Math",{log10:function log10(t){return Math.log(t)*Math.LOG10E}})},{62:62}],195:[function(t,n,r){var e=t(62);e(e.S,"Math",{log1p:t(92)})},{62:62,92:92}],196:[function(t,n,r){var e=t(62);e(e.S,"Math",{log2:function log2(t){return Math.log(t)/Math.LN2}})},{62:62}],197:[function(t,n,r){var e=t(62);e(e.S,"Math",{sign:t(93)})},{62:62,93:93}],198:[function(t,n,r){var e=t(62),i=t(90),o=Math.exp;e(e.S+e.F*t(64)(function(){return-2e-17!=!Math.sinh(-2e-17)}),"Math",{sinh:function sinh(t){return Math.abs(t=+t)<1?(i(t)-i(-t))/2:(o(t-1)-o(-t-1))*(Math.E/2)}})},{62:62,64:64,90:90}],199:[function(t,n,r){var e=t(62),i=t(90),o=Math.exp;e(e.S,"Math",{tanh:function tanh(t){var n=i(t=+t),r=i(-t);return n==1/0?1:r==1/0?-1:(n-r)/(o(t)+o(-t))}})},{62:62,90:90}],200:[function(t,n,r){var e=t(62);e(e.S,"Math",{trunc:function trunc(t){return(0<t?Math.floor:Math.ceil)(t)}})},{62:62}],201:[function(t,n,r){"use strict";function EN(t){var n=s(t,!1);if("string"==typeof n&&2<n.length){var r,e,i,o=(n=x?n.trim():h(n,3)).charCodeAt(0);if(43===o||45===o){if(88===(r=n.charCodeAt(2))||120===r)return NaN}else if(48===o){switch(n.charCodeAt(1)){case 66:case 98:e=2,i=49;break;case 79:case 111:e=8,i=55;break;default:return+n}for(var u,c=n.slice(2),a=0,f=c.length;a<f;a++)if((u=c.charCodeAt(a))<48||i<u)return NaN;return parseInt(c,e)}}return+n}var e=t(70),i=t(71),o=t(48),u=t(75),s=t(143),c=t(64),a=t(103).f,f=t(101).f,l=t(99).f,h=t(134).trim,p="Number",v=e[p],g=v,y=v.prototype,d=o(t(98)(y))==p,x="trim"in String.prototype;if(!v(" 0o1")||!v("0b1")||v("+0x1")){v=function Number(t){var n=arguments.length<1?0:t,r=this;return r instanceof v&&(d?c(function(){y.valueOf.call(r)}):o(r)!=p)?u(new g(EN(n)),r,v):EN(n)};for(var m,S=t(58)?a(g):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),b=0;S.length>b;b++)i(g,m=S[b])&&!i(v,m)&&l(v,m,f(g,m));(v.prototype=y).constructor=v,t(118)(e,p,v)}},{101:101,103:103,118:118,134:134,143:143,48:48,58:58,64:64,70:70,71:71,75:75,98:98,99:99}],202:[function(t,n,r){var e=t(62);e(e.S,"Number",{EPSILON:Math.pow(2,-52)})},{62:62}],203:[function(t,n,r){var e=t(62),i=t(70).isFinite;e(e.S,"Number",{isFinite:function isFinite(t){return"number"==typeof t&&i(t)}})},{62:62,70:70}],204:[function(t,n,r){var e=t(62);e(e.S,"Number",{isInteger:t(80)})},{62:62,80:80}],205:[function(t,n,r){var e=t(62);e(e.S,"Number",{isNaN:function isNaN(t){return t!=t}})},{62:62}],206:[function(t,n,r){var e=t(62),i=t(80),o=Math.abs;e(e.S,"Number",{isSafeInteger:function isSafeInteger(t){return i(t)&&o(t)<=9007199254740991}})},{62:62,80:80}],207:[function(t,n,r){var e=t(62);e(e.S,"Number",{MAX_SAFE_INTEGER:9007199254740991})},{62:62}],208:[function(t,n,r){var e=t(62);e(e.S,"Number",{MIN_SAFE_INTEGER:-9007199254740991})},{62:62}],209:[function(t,n,r){var e=t(62),i=t(112);e(e.S+e.F*(Number.parseFloat!=i),"Number",{parseFloat:i})},{112:112,62:62}],210:[function(t,n,r){var e=t(62),i=t(113);e(e.S+e.F*(Number.parseInt!=i),"Number",{parseInt:i})},{113:113,62:62}],211:[function(t,n,r){"use strict";function XO(t,n){for(var r=-1,e=n;++r<6;)e+=t*u[r],u[r]=e%1e7,e=o(e/1e7)}function YO(t){for(var n=6,r=0;0<=--n;)r+=u[n],u[n]=o(r/t),r=r%t*1e7}function ZO(){for(var t=6,n="";0<=--t;)if(""!==n||0===t||0!==u[t]){var r=String(u[t]);n=""===n?r:n+l.call("0",7-r.length)+r}return n}var e=t(62),f=t(139),s=t(34),l=t(133),i=1..toFixed,o=Math.floor,u=[0,0,0,0,0,0],h="Number.toFixed: incorrect invocation!",p=function(t,n,r){return 0===n?r:n%2==1?p(t,n-1,r*t):p(t*t,n/2,r)};e(e.P+e.F*(!!i&&("0.000"!==8e-5.toFixed(3)||"1"!==.9.toFixed(0)||"1.25"!==1.255.toFixed(2)||"1000000000000000128"!==(0xde0b6b3a7640080).toFixed(0))||!t(64)(function(){i.call({})})),"Number",{toFixed:function toFixed(t){var n,r,e,i,o=s(this,h),u=f(t),c="",a="0";if(u<0||20<u)throw RangeError(h);if(o!=o)return"NaN";if(o<=-1e21||1e21<=o)return String(o);if(o<0&&(c="-",o=-o),1e-21<o)if(r=(n=function(t){for(var n=0,r=t;4096<=r;)n+=12,r/=4096;for(;2<=r;)n+=1,r/=2;return n}(o*p(2,69,1))-69)<0?o*p(2,-n,1):o/p(2,n,1),r*=4503599627370496,0<(n=52-n)){for(XO(0,r),e=u;7<=e;)XO(1e7,0),e-=7;for(XO(p(10,e,1),0),e=n-1;23<=e;)YO(1<<23),e-=23;YO(1<<e),XO(1,1),YO(2),a=ZO()}else XO(0,r),XO(1<<-n,0),a=ZO()+l.call("0",u);return a=0<u?c+((i=a.length)<=u?"0."+l.call("0",u-i)+a:a.slice(0,i-u)+"."+a.slice(i-u)):c+a}})},{133:133,139:139,34:34,62:62,64:64}],212:[function(t,n,r){"use strict";var e=t(62),i=t(64),o=t(34),u=1..toPrecision;e(e.P+e.F*(i(function(){return"1"!==u.call(1,void 0)})||!i(function(){u.call({})})),"Number",{toPrecision:function toPrecision(t){var n=o(this,"Number#toPrecision: incorrect invocation!");return void 0===t?u.call(n):u.call(n,t)}})},{34:34,62:62,64:64}],213:[function(t,n,r){var e=t(62);e(e.S+e.F,"Object",{assign:t(97)})},{62:62,97:97}],214:[function(t,n,r){var e=t(62);e(e.S,"Object",{create:t(98)})},{62:62,98:98}],215:[function(t,n,r){var e=t(62);e(e.S+e.F*!t(58),"Object",{defineProperties:t(100)})},{100:100,58:58,62:62}],216:[function(t,n,r){var e=t(62);e(e.S+e.F*!t(58),"Object",{defineProperty:t(99).f})},{58:58,62:62,99:99}],217:[function(t,n,r){var e=t(81),i=t(94).onFreeze;t(109)("freeze",function(n){return function freeze(t){return n&&e(t)?n(i(t)):t}})},{109:109,81:81,94:94}],218:[function(t,n,r){var e=t(140),i=t(101).f;t(109)("getOwnPropertyDescriptor",function(){return function getOwnPropertyDescriptor(t,n){return i(e(t),n)}})},{101:101,109:109,140:140}],219:[function(t,n,r){t(109)("getOwnPropertyNames",function(){return t(102).f})},{102:102,109:109}],220:[function(t,n,r){var e=t(142),i=t(105);t(109)("getPrototypeOf",function(){return function getPrototypeOf(t){return i(e(t))}})},{105:105,109:109,142:142}],221:[function(t,n,r){var e=t(81);t(109)("isExtensible",function(n){return function isExtensible(t){return!!e(t)&&(!n||n(t))}})},{109:109,81:81}],222:[function(t,n,r){var e=t(81);t(109)("isFrozen",function(n){return function isFrozen(t){return!e(t)||!!n&&n(t)}})},{109:109,81:81}],223:[function(t,n,r){var e=t(81);t(109)("isSealed",function(n){return function isSealed(t){return!e(t)||!!n&&n(t)}})},{109:109,81:81}],224:[function(t,n,r){var e=t(62);e(e.S,"Object",{is:t(121)})},{121:121,62:62}],225:[function(t,n,r){var e=t(142),i=t(107);t(109)("keys",function(){return function keys(t){return i(e(t))}})},{107:107,109:109,142:142}],226:[function(t,n,r){var e=t(81),i=t(94).onFreeze;t(109)("preventExtensions",function(n){return function preventExtensions(t){return n&&e(t)?n(i(t)):t}})},{109:109,81:81,94:94}],227:[function(t,n,r){var e=t(81),i=t(94).onFreeze;t(109)("seal",function(n){return function seal(t){return n&&e(t)?n(i(t)):t}})},{109:109,81:81,94:94}],228:[function(t,n,r){var e=t(62);e(e.S,"Object",{setPrototypeOf:t(122).set})},{122:122,62:62}],229:[function(t,n,r){"use strict";var e=t(47),i={};i[t(152)("toStringTag")]="z",i+""!="[object z]"&&t(118)(Object.prototype,"toString",function toString(){return"[object "+e(this)+"]"},!0)},{118:118,152:152,47:47}],230:[function(t,n,r){var e=t(62),i=t(112);e(e.G+e.F*(parseFloat!=i),{parseFloat:i})},{112:112,62:62}],231:[function(t,n,r){var e=t(62),i=t(113);e(e.G+e.F*(parseInt!=i),{parseInt:i})},{113:113,62:62}],232:[function(r,t,n){"use strict";function $R(){}function fS(t){var n;return!(!h(t)||"function"!=typeof(n=t.then))&&n}function gS(s,r){if(!s._n){s._n=!0;var e=s._c;x(function(){for(var a=s._v,f=1==s._s,t=0,n=function(t){var n,r,e,i=f?t.ok:t.fail,o=t.resolve,u=t.reject,c=t.domain;try{i?(f||(2==s._h&&R(s),s._h=1),!0===i?n=a:(c&&c.enter(),n=i(a),c&&(c.exit(),e=!0)),n===t.promise?u(E("Promise-chain cycle")):(r=fS(n))?r.call(n,o,u):o(n)):u(a)}catch(t){c&&!e&&c.exit(),u(t)}};e.length>t;)n(e[t++]);s._c=[],s._n=!1,r&&!s._h&&N(s)})}}function kS(t){var n=this;n._d||(n._d=!0,(n=n._w||n)._v=t,n._s=2,n._a||(n._a=n._c.slice()),gS(n,!0))}var e,i,o,u,c=r(89),a=r(70),f=r(54),s=r(47),l=r(62),h=r(81),p=r(33),v=r(37),g=r(68),y=r(127),d=r(136).set,x=r(95)(),m=r(96),S=r(114),b=r(148),w=r(115),_="Promise",E=a.TypeError,O=a.process,F=O&&O.versions,I=F&&F.v8||"",P=a[_],A="process"==s(O),M=i=m.f,k=!!function(){try{var t=P.resolve(1),n=(t.constructor={})[r(152)("species")]=function(t){t($R,$R)};return(A||"function"==typeof PromiseRejectionEvent)&&t.then($R)instanceof n&&0!==I.indexOf("6.6")&&-1===b.indexOf("Chrome/66")}catch(t){}}(),N=function(o){d.call(a,function(){var t,n,r,e=o._v,i=j(o);if(i&&(t=S(function(){A?O.emit("unhandledRejection",e,o):(n=a.onunhandledrejection)?n({promise:o,reason:e}):(r=a.console)&&r.error&&r.error("Unhandled promise rejection",e)}),o._h=A||j(o)?2:1),o._a=void 0,i&&t.e)throw t.v})},j=function(t){return 1!==t._h&&0===(t._a||t._c).length},R=function(n){d.call(a,function(){var t;A?O.emit("rejectionHandled",n):(t=a.onrejectionhandled)&&t({promise:n,reason:n._v})})},T=function(t){var r,e=this;if(!e._d){e._d=!0,e=e._w||e;try{if(e===t)throw E("Promise can't be resolved itself");(r=fS(t))?x(function(){var n={_w:e,_d:!1};try{r.call(t,f(T,n,1),f(kS,n,1))}catch(t){kS.call(n,t)}}):(e._v=t,e._s=1,gS(e,!1))}catch(t){kS.call({_w:e,_d:!1},t)}}};k||(P=function Promise(t){v(this,P,_,"_h"),p(t),e.call(this);try{t(f(T,this,1),f(kS,this,1))}catch(t){kS.call(this,t)}},(e=function Promise(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=r(117)(P.prototype,{then:function then(t,n){var r=M(y(this,P));return r.ok="function"!=typeof t||t,r.fail="function"==typeof n&&n,r.domain=A?O.domain:void 0,this._c.push(r),this._a&&this._a.push(r),this._s&&gS(this,!1),r.promise},catch:function(t){return this.then(void 0,t)}}),o=function(){var t=new e;this.promise=t,this.resolve=f(T,t,1),this.reject=f(kS,t,1)},m.f=M=function(t){return t===P||t===u?new o(t):i(t)}),l(l.G+l.W+l.F*!k,{Promise:P}),r(124)(P,_),r(123)(_),u=r(52)[_],l(l.S+l.F*!k,_,{reject:function reject(t){var n=M(this);return(0,n.reject)(t),n.promise}}),l(l.S+l.F*(c||!k),_,{resolve:function resolve(t){return w(c&&this===u?P:this,t)}}),l(l.S+l.F*!(k&&r(86)(function(t){P.all(t).catch($R)})),_,{all:function all(t){var u=this,n=M(u),c=n.resolve,a=n.reject,r=S(function(){var e=[],i=0,o=1;g(t,!1,function(t){var n=i++,r=!1;e.push(void 0),o++,u.resolve(t).then(function(t){r||(r=!0,e[n]=t,--o||c(e))},a)}),--o||c(e)});return r.e&&a(r.v),n.promise},race:function race(t){var n=this,r=M(n),e=r.reject,i=S(function(){g(t,!1,function(t){n.resolve(t).then(r.resolve,e)})});return i.e&&e(i.v),r.promise}})},{114:114,115:115,117:117,123:123,124:124,127:127,136:136,148:148,152:152,33:33,37:37,47:47,52:52,54:54,62:62,68:68,70:70,81:81,86:86,89:89,95:95,96:96}],233:[function(t,n,r){var e=t(62),o=t(33),u=t(38),c=(t(70).Reflect||{}).apply,a=Function.apply;e(e.S+e.F*!t(64)(function(){c(function(){})}),"Reflect",{apply:function apply(t,n,r){var e=o(t),i=u(r);return c?c(e,n,i):a.call(e,n,i)}})},{33:33,38:38,62:62,64:64,70:70}],234:[function(t,n,r){var e=t(62),c=t(98),a=t(33),f=t(38),s=t(81),i=t(64),l=t(46),h=(t(70).Reflect||{}).construct,p=i(function(){function F(){}return!(h(function(){},[],F)instanceof F)}),v=!i(function(){h(function(){})});e(e.S+e.F*(p||v),"Reflect",{construct:function construct(t,n){a(t),f(n);var r=arguments.length<3?t:a(arguments[2]);if(v&&!p)return h(t,n,r);if(t==r){switch(n.length){case 0:return new t;case 1:return new t(n[0]);case 2:return new t(n[0],n[1]);case 3:return new t(n[0],n[1],n[2]);case 4:return new t(n[0],n[1],n[2],n[3])}var e=[null];return e.push.apply(e,n),new(l.apply(t,e))}var i=r.prototype,o=c(s(i)?i:Object.prototype),u=Function.apply.call(t,o,n);return s(u)?u:o}})},{33:33,38:38,46:46,62:62,64:64,70:70,81:81,98:98}],235:[function(t,n,r){var e=t(99),i=t(62),o=t(38),u=t(143);i(i.S+i.F*t(64)(function(){Reflect.defineProperty(e.f({},1,{value:1}),1,{value:2})}),"Reflect",{defineProperty:function defineProperty(t,n,r){o(t),n=u(n,!0),o(r);try{return e.f(t,n,r),!0}catch(t){return!1}}})},{143:143,38:38,62:62,64:64,99:99}],236:[function(t,n,r){var e=t(62),i=t(101).f,o=t(38);e(e.S,"Reflect",{deleteProperty:function deleteProperty(t,n){var r=i(o(t),n);return!(r&&!r.configurable)&&delete t[n]}})},{101:101,38:38,62:62}],237:[function(t,n,r){"use strict";function IU(t){this._t=i(t),this._i=0;var n,r=this._k=[];for(n in t)r.push(n)}var e=t(62),i=t(38);t(84)(IU,"Object",function(){var t,n=this._k;do{if(this._i>=n.length)return{value:void 0,done:!0}}while(!((t=n[this._i++])in this._t));return{value:t,done:!1}}),e(e.S,"Reflect",{enumerate:function enumerate(t){return new IU(t)}})},{38:38,62:62,84:84}],238:[function(t,n,r){var e=t(101),i=t(62),o=t(38);i(i.S,"Reflect",{getOwnPropertyDescriptor:function getOwnPropertyDescriptor(t,n){return e.f(o(t),n)}})},{101:101,38:38,62:62}],239:[function(t,n,r){var e=t(62),i=t(105),o=t(38);e(e.S,"Reflect",{getPrototypeOf:function getPrototypeOf(t){return i(o(t))}})},{105:105,38:38,62:62}],240:[function(t,n,r){var o=t(101),u=t(105),c=t(71),e=t(62),a=t(81),f=t(38);e(e.S,"Reflect",{get:function get(t,n){var r,e,i=arguments.length<3?t:arguments[2];return f(t)===i?t[n]:(r=o.f(t,n))?c(r,"value")?r.value:void 0!==r.get?r.get.call(i):void 0:a(e=u(t))?get(e,n,i):void 0}})},{101:101,105:105,38:38,62:62,71:71,81:81}],241:[function(t,n,r){var e=t(62);e(e.S,"Reflect",{has:function has(t,n){return n in t}})},{62:62}],242:[function(t,n,r){var e=t(62),i=t(38),o=Object.isExtensible;e(e.S,"Reflect",{isExtensible:function isExtensible(t){return i(t),!o||o(t)}})},{38:38,62:62}],243:[function(t,n,r){var e=t(62);e(e.S,"Reflect",{ownKeys:t(111)})},{111:111,62:62}],244:[function(t,n,r){var e=t(62),i=t(38),o=Object.preventExtensions;e(e.S,"Reflect",{preventExtensions:function preventExtensions(t){i(t);try{return o&&o(t),!0}catch(t){return!1}}})},{38:38,62:62}],245:[function(t,n,r){var e=t(62),i=t(122);i&&e(e.S,"Reflect",{setPrototypeOf:function setPrototypeOf(t,n){i.check(t,n);try{return i.set(t,n),!0}catch(t){return!1}}})},{122:122,62:62}],246:[function(t,n,r){var c=t(99),a=t(101),f=t(105),s=t(71),e=t(62),l=t(116),h=t(38),p=t(81);e(e.S,"Reflect",{set:function set(t,n,r){var e,i,o=arguments.length<4?t:arguments[3],u=a.f(h(t),n);if(!u){if(p(i=f(t)))return set(i,n,r,o);u=l(0)}if(s(u,"value")){if(!1===u.writable||!p(o))return!1;if(e=a.f(o,n)){if(e.get||e.set||!1===e.writable)return!1;e.value=r,c.f(o,n,e)}else c.f(o,n,l(0,r));return!0}return void 0!==u.set&&(u.set.call(o,r),!0)}})},{101:101,105:105,116:116,38:38,62:62,71:71,81:81,99:99}],247:[function(t,n,r){var e=t(70),o=t(75),i=t(99).f,u=t(103).f,c=t(82),a=t(66),f=e.RegExp,s=f,l=f.prototype,h=/a/g,p=/a/g,v=new f(h)!==h;if(t(58)&&(!v||t(64)(function(){return p[t(152)("match")]=!1,f(h)!=h||f(p)==p||"/a/i"!=f(h,"i")}))){f=function RegExp(t,n){var r=this instanceof f,e=c(t),i=void 0===n;return!r&&e&&t.constructor===f&&i?t:o(v?new s(e&&!i?t.source:t,n):s((e=t instanceof f)?t.source:t,e&&i?a.call(t):n),r?this:l,f)};function DW(n){n in f||i(f,n,{configurable:!0,get:function(){return s[n]},set:function(t){s[n]=t}})}for(var g=u(s),y=0;g.length>y;)DW(g[y++]);(l.constructor=f).prototype=l,t(118)(e,"RegExp",f)}t(123)("RegExp")},{103:103,118:118,123:123,152:152,58:58,64:64,66:66,70:70,75:75,82:82,99:99}],248:[function(t,n,r){"use strict";var e=t(120);t(62)({target:"RegExp",proto:!0,forced:e!==/./.exec},{exec:e})},{120:120,62:62}],249:[function(t,n,r){t(58)&&"g"!=/./g.flags&&t(99).f(RegExp.prototype,"flags",{configurable:!0,get:t(66)})},{58:58,66:66,99:99}],250:[function(t,n,r){"use strict";var l=t(38),h=t(141),p=t(36),v=t(119);t(65)("match",1,function(e,i,f,s){return[function match(t){var n=e(this),r=null==t?void 0:t[i];return void 0!==r?r.call(t,n):new RegExp(t)[i](String(n))},function(t){var n=s(f,t,this);if(n.done)return n.value;var r=l(t),e=String(this);if(!r.global)return v(r,e);for(var i,o=r.unicode,u=[],c=r.lastIndex=0;null!==(i=v(r,e));){var a=String(i[0]);""===(u[c]=a)&&(r.lastIndex=p(e,h(r.lastIndex),o)),c++}return 0===c?null:u}]})},{119:119,141:141,36:36,38:38,65:65}],251:[function(t,n,r){"use strict";var _=t(38),e=t(142),E=t(141),O=t(139),F=t(36),I=t(119),P=Math.max,A=Math.min,h=Math.floor,p=/\$([$&`']|\d\d?|<[^>]*>)/g,v=/\$([$&`']|\d\d?)/g;t(65)("replace",2,function(i,o,b,w){return[function replace(t,n){var r=i(this),e=null==t?void 0:t[o];return void 0!==e?e.call(t,r,n):b.call(String(r),t,n)},function(t,n){var r=w(b,t,this,n);if(r.done)return r.value;var e=_(t),i=String(this),o="function"==typeof n;o||(n=String(n));var u=e.global;if(u){var c=e.unicode;e.lastIndex=0}for(var a=[];;){var f=I(e,i);if(null===f)break;if(a.push(f),!u)break;""===String(f[0])&&(e.lastIndex=F(i,E(e.lastIndex),c))}for(var s,l="",h=0,p=0;p<a.length;p++){f=a[p];for(var v=String(f[0]),g=P(A(O(f.index),i.length),0),y=[],d=1;d<f.length;d++)y.push(void 0===(s=f[d])?s:String(s));var x=f.groups;if(o){var m=[v].concat(y,g,i);void 0!==x&&m.push(x);var S=String(n.apply(void 0,m))}else S=getSubstitution(v,i,g,y,x,n);h<=g&&(l+=i.slice(h,g)+S,h=g+v.length)}return l+i.slice(h)}];function getSubstitution(o,u,c,a,f,t){var s=c+o.length,l=a.length,n=v;return void 0!==f&&(f=e(f),n=p),b.call(t,n,function(t,n){var r;switch(n.charAt(0)){case"$":return"$";case"&":return o;case"`":return u.slice(0,c);case"'":return u.slice(s);case"<":r=f[n.slice(1,-1)];break;default:var e=+n;if(0==e)return t;if(l<e){var i=h(e/10);return 0===i?t:i<=l?void 0===a[i-1]?n.charAt(1):a[i-1]+n.charAt(1):t}r=a[e-1]}return void 0===r?"":r})}})},{119:119,139:139,141:141,142:142,36:36,38:38,65:65}],252:[function(t,n,r){"use strict";var a=t(38),f=t(121),s=t(119);t(65)("search",1,function(e,i,u,c){return[function search(t){var n=e(this),r=null==t?void 0:t[i];return void 0!==r?r.call(t,n):new RegExp(t)[i](String(n))},function(t){var n=c(u,t,this);if(n.done)return n.value;var r=a(t),e=String(this),i=r.lastIndex;f(i,0)||(r.lastIndex=0);var o=s(r,e);return f(r.lastIndex,i)||(r.lastIndex=i),null===o?-1:o.index}]})},{119:119,121:121,38:38,65:65}],253:[function(t,n,r){"use strict";var l=t(82),m=t(38),S=t(127),b=t(36),w=t(141),_=t(119),h=t(120),e=t(64),E=Math.min,p=[].push,u="split",v="length",g="lastIndex",O=4294967295,F=!e(function(){RegExp(O,"y")});t(65)("split",2,function(i,o,y,d){var x;return x="c"=="abbc"[u](/(b)*/)[1]||4!="test"[u](/(?:)/,-1)[v]||2!="ab"[u](/(?:ab)*/)[v]||4!="."[u](/(.?)(.?)/)[v]||1<"."[u](/()()/)[v]||""[u](/.?/)[v]?function(t,n){var r=String(this);if(void 0===t&&0===n)return[];if(!l(t))return y.call(r,t,n);for(var e,i,o,u=[],c=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),a=0,f=void 0===n?O:n>>>0,s=new RegExp(t.source,c+"g");(e=h.call(s,r))&&!(a<(i=s[g])&&(u.push(r.slice(a,e.index)),1<e[v]&&e.index<r[v]&&p.apply(u,e.slice(1)),o=e[0][v],a=i,u[v]>=f));)s[g]===e.index&&s[g]++;return a===r[v]?!o&&s.test("")||u.push(""):u.push(r.slice(a)),u[v]>f?u.slice(0,f):u}:"0"[u](void 0,0)[v]?function(t,n){return void 0===t&&0===n?[]:y.call(this,t,n)}:y,[function split(t,n){var r=i(this),e=null==t?void 0:t[o];return void 0!==e?e.call(t,r,n):x.call(String(r),t,n)},function(t,n){var r=d(x,t,this,n,x!==y);if(r.done)return r.value;var e=m(t),i=String(this),o=S(e,RegExp),u=e.unicode,c=(e.ignoreCase?"i":"")+(e.multiline?"m":"")+(e.unicode?"u":"")+(F?"y":"g"),a=new o(F?e:"^(?:"+e.source+")",c),f=void 0===n?O:n>>>0;if(0==f)return[];if(0===i.length)return null===_(a,i)?[i]:[];for(var s=0,l=0,h=[];l<i.length;){a.lastIndex=F?l:0;var p,v=_(a,F?i:i.slice(l));if(null===v||(p=E(w(a.lastIndex+(F?0:l)),i.length))===s)l=b(i,l,u);else{if(h.push(i.slice(s,l)),h.length===f)return h;for(var g=1;g<=v.length-1;g++)if(h.push(v[g]),h.length===f)return h;l=s=p}}return h.push(i.slice(s)),h}]})},{119:119,120:120,127:127,141:141,36:36,38:38,64:64,65:65,82:82}],254:[function(n,t,r){"use strict";n(249);function XZ(t){n(118)(RegExp.prototype,u,t,!0)}var e=n(38),i=n(66),o=n(58),u="toString",c=/./[u];n(64)(function(){return"/a/b"!=c.call({source:"a",flags:"b"})})?XZ(function toString(){var t=e(this);return"/".concat(t.source,"/","flags"in t?t.flags:!o&&t instanceof RegExp?i.call(t):void 0)}):c.name!=u&&XZ(function toString(){return c.call(this)})},{118:118,249:249,38:38,58:58,64:64,66:66}],255:[function(t,n,r){"use strict";var e=t(49),i=t(149);n.exports=t(51)("Set",function(t){return function Set(){return t(this,0<arguments.length?arguments[0]:void 0)}},{add:function add(t){return e.def(i(this,"Set"),t=0===t?0:t,t)}},e)},{149:149,49:49,51:51}],256:[function(t,n,r){"use strict";t(131)("anchor",function(n){return function anchor(t){return n(this,"a","name",t)}})},{131:131}],257:[function(t,n,r){"use strict";t(131)("big",function(t){return function big(){return t(this,"big","","")}})},{131:131}],258:[function(t,n,r){"use strict";t(131)("blink",function(t){return function blink(){return t(this,"blink","","")}})},{131:131}],259:[function(t,n,r){"use strict";t(131)("bold",function(t){return function bold(){return t(this,"b","","")}})},{131:131}],260:[function(t,n,r){"use strict";var e=t(62),i=t(129)(!1);e(e.P,"String",{codePointAt:function codePointAt(t){return i(this,t)}})},{129:129,62:62}],261:[function(t,n,r){"use strict";var e=t(62),u=t(141),c=t(130),a="endsWith",f=""[a];e(e.P+e.F*t(63)(a),"String",{endsWith:function endsWith(t){var n=c(this,t,a),r=1<arguments.length?arguments[1]:void 0,e=u(n.length),i=void 0===r?e:Math.min(u(r),e),o=String(t);return f?f.call(n,o,i):n.slice(i-o.length,i)===o}})},{130:130,141:141,62:62,63:63}],262:[function(t,n,r){"use strict";t(131)("fixed",function(t){return function fixed(){return t(this,"tt","","")}})},{131:131}],263:[function(t,n,r){"use strict";t(131)("fontcolor",function(n){return function fontcolor(t){return n(this,"font","color",t)}})},{131:131}],264:[function(t,n,r){"use strict";t(131)("fontsize",function(n){return function fontsize(t){return n(this,"font","size",t)}})},{131:131}],265:[function(t,n,r){var e=t(62),o=t(137),u=String.fromCharCode,i=String.fromCodePoint;e(e.S+e.F*(!!i&&1!=i.length),"String",{fromCodePoint:function fromCodePoint(t){for(var n,r=[],e=arguments.length,i=0;i<e;){if(n=+arguments[i++],o(n,1114111)!==n)throw RangeError(n+" is not a valid code point");r.push(n<65536?u(n):u(55296+((n-=65536)>>10),n%1024+56320))}return r.join("")}})},{137:137,62:62}],266:[function(t,n,r){"use strict";var e=t(62),i=t(130),o="includes";e(e.P+e.F*t(63)(o),"String",{includes:function includes(t){return!!~i(this,t,o).indexOf(t,1<arguments.length?arguments[1]:void 0)}})},{130:130,62:62,63:63}],267:[function(t,n,r){"use strict";t(131)("italics",function(t){return function italics(){return t(this,"i","","")}})},{131:131}],268:[function(t,n,r){"use strict";var e=t(129)(!0);t(85)(String,"String",function(t){this._t=String(t),this._i=0},function(){var t,n=this._t,r=this._i;return r>=n.length?{value:void 0,done:!0}:(t=e(n,r),this._i+=t.length,{value:t,done:!1})})},{129:129,85:85}],269:[function(t,n,r){"use strict";t(131)("link",function(n){return function link(t){return n(this,"a","href",t)}})},{131:131}],270:[function(t,n,r){var e=t(62),u=t(140),c=t(141);e(e.S,"String",{raw:function raw(t){for(var n=u(t.raw),r=c(n.length),e=arguments.length,i=[],o=0;o<r;)i.push(String(n[o++])),o<e&&i.push(String(arguments[o]));return i.join("")}})},{140:140,141:141,62:62}],271:[function(t,n,r){var e=t(62);e(e.P,"String",{repeat:t(133)})},{133:133,62:62}],272:[function(t,n,r){"use strict";t(131)("small",function(t){return function small(){return t(this,"small","","")}})},{131:131}],273:[function(t,n,r){"use strict";var e=t(62),i=t(141),o=t(130),u="startsWith",c=""[u];e(e.P+e.F*t(63)(u),"String",{startsWith:function startsWith(t){var n=o(this,t,u),r=i(Math.min(1<arguments.length?arguments[1]:void 0,n.length)),e=String(t);return c?c.call(n,e,r):n.slice(r,r+e.length)===e}})},{130:130,141:141,62:62,63:63}],274:[function(t,n,r){"use strict";t(131)("strike",function(t){return function strike(){return t(this,"strike","","")}})},{131:131}],275:[function(t,n,r){"use strict";t(131)("sub",function(t){return function sub(){return t(this,"sub","","")}})},{131:131}],276:[function(t,n,r){"use strict";t(131)("sup",function(t){return function sup(){return t(this,"sup","","")}})},{131:131}],277:[function(t,n,r){"use strict";t(134)("trim",function(t){return function trim(){return t(this,3)}})},{134:134}],278:[function(t,n,r){"use strict";function B1(t){var n=W[t]=E(j[L]);return n._k=t,n}function E1(t,n){x(t);for(var r,e=y(n=b(n)),i=0,o=e.length;i<o;)Q(t,r=e[i++],n[r]);return t}function G1(t){var n=D.call(this,t=w(t,!0));return!(this===B&&u(W,t)&&!u(V,t))&&(!(n||!u(this,t)||!u(W,t)||u(this,C)&&this[C][t])||n)}function H1(t,n){if(t=b(t),n=w(n,!0),t!==B||!u(W,n)||u(V,n)){var r=M(t,n);return!r||!u(W,n)||u(t,C)&&t[C][n]||(r.enumerable=!0),r}}function I1(t){for(var n,r=N(b(t)),e=[],i=0;r.length>i;)u(W,n=r[i++])||n==C||n==a||e.push(n);return e}function J1(t){for(var n,r=t===B,e=N(r?V:b(t)),i=[],o=0;e.length>o;)!u(W,n=e[o++])||r&&!u(B,n)||i.push(W[n]);return i}var e=t(70),u=t(71),i=t(58),o=t(62),c=t(118),a=t(94).KEY,f=t(64),s=t(126),l=t(124),h=t(147),p=t(152),v=t(151),g=t(150),y=t(61),d=t(79),x=t(38),m=t(81),S=t(142),b=t(140),w=t(143),_=t(116),E=t(98),O=t(102),F=t(101),I=t(104),P=t(99),A=t(107),M=F.f,k=P.f,N=O.f,j=e.Symbol,R=e.JSON,T=R&&R.stringify,L="prototype",C=p("_hidden"),G=p("toPrimitive"),D={}.propertyIsEnumerable,U=s("symbol-registry"),W=s("symbols"),V=s("op-symbols"),B=Object[L],q="function"==typeof j&&!!I.f,Y=e.QObject,z=!Y||!Y[L]||!Y[L].findChild,X=i&&f(function(){return 7!=E(k({},"a",{get:function(){return k(this,"a",{value:7}).a}})).a})?function(t,n,r){var e=M(B,n);e&&delete B[n],k(t,n,r),e&&t!==B&&k(B,n,e)}:k,$=q&&"symbol"==typeof j.iterator?function(t){return"symbol"==typeof t}:function(t){return t instanceof j},Q=function defineProperty(t,n,r){return t===B&&Q(V,n,r),x(t),n=w(n,!0),x(r),u(W,n)?(r.enumerable?(u(t,C)&&t[C][n]&&(t[C][n]=!1),r=E(r,{enumerable:_(0,!1)})):(u(t,C)||k(t,C,_(1,{})),t[C][n]=!0),X(t,n,r)):k(t,n,r)};q||(c((j=function Symbol(){if(this instanceof j)throw TypeError("Symbol is not a constructor!");var n=h(0<arguments.length?arguments[0]:void 0),r=function(t){this===B&&r.call(V,t),u(this,C)&&u(this[C],n)&&(this[C][n]=!1),X(this,n,_(1,t))};return i&&z&&X(B,n,{configurable:!0,set:r}),B1(n)})[L],"toString",function toString(){return this._k}),F.f=H1,P.f=Q,t(103).f=O.f=I1,t(108).f=G1,I.f=J1,i&&!t(89)&&c(B,"propertyIsEnumerable",G1,!0),v.f=function(t){return B1(p(t))}),o(o.G+o.W+o.F*!q,{Symbol:j});for(var Z="hasInstance,isConcatSpreadable,iterator,match,replace,search,species,split,toPrimitive,toStringTag,unscopables".split(","),J=0;Z.length>J;)p(Z[J++]);for(var H=A(p.store),K=0;H.length>K;)g(H[K++]);o(o.S+o.F*!q,"Symbol",{for:function(t){return u(U,t+="")?U[t]:U[t]=j(t)},keyFor:function keyFor(t){if(!$(t))throw TypeError(t+" is not a symbol!");for(var n in U)if(U[n]===t)return n},useSetter:function(){z=!0},useSimple:function(){z=!1}}),o(o.S+o.F*!q,"Object",{create:function create(t,n){return void 0===n?E(t):E1(E(t),n)},defineProperty:Q,defineProperties:E1,getOwnPropertyDescriptor:H1,getOwnPropertyNames:I1,getOwnPropertySymbols:J1});var tt=f(function(){I.f(1)});o(o.S+o.F*tt,"Object",{getOwnPropertySymbols:function getOwnPropertySymbols(t){return I.f(S(t))}}),R&&o(o.S+o.F*(!q||f(function(){var t=j();return"[null]"!=T([t])||"{}"!=T({a:t})||"{}"!=T(Object(t))})),"JSON",{stringify:function stringify(t){for(var n,r,e=[t],i=1;i<arguments.length;)e.push(arguments[i++]);if(r=n=e[1],(m(n)||void 0!==t)&&!$(t))return d(n)||(n=function(t,n){if("function"==typeof r&&(n=r.call(this,t,n)),!$(n))return n}),e[1]=n,T.apply(R,e)}}),j[L][G]||t(72)(j[L],G,j[L].valueOf),l(j,"Symbol"),l(Math,"Math",!0),l(e.JSON,"JSON",!0)},{101:101,102:102,103:103,104:104,107:107,108:108,116:116,118:118,124:124,126:126,140:140,142:142,143:143,147:147,150:150,151:151,152:152,38:38,58:58,61:61,62:62,64:64,70:70,71:71,72:72,79:79,81:81,89:89,94:94,98:98,99:99}],279:[function(t,n,r){"use strict";var e=t(62),i=t(146),o=t(145),f=t(38),s=t(137),l=t(141),u=t(81),c=t(70).ArrayBuffer,h=t(127),p=o.ArrayBuffer,v=o.DataView,a=i.ABV&&c.isView,g=p.prototype.slice,y=i.VIEW,d="ArrayBuffer";e(e.G+e.W+e.F*(c!==p),{ArrayBuffer:p}),e(e.S+e.F*!i.CONSTR,d,{isView:function isView(t){return a&&a(t)||u(t)&&y in t}}),e(e.P+e.U+e.F*t(64)(function(){return!new p(2).slice(1,void 0).byteLength}),d,{slice:function slice(t,n){if(void 0!==g&&void 0===n)return g.call(f(this),t);for(var r=f(this).byteLength,e=s(t,r),i=s(void 0===n?r:n,r),o=new(h(this,p))(l(i-e)),u=new v(this),c=new v(o),a=0;e<i;)c.setUint8(a++,u.getUint8(e++));return o}}),t(123)(d)},{123:123,127:127,137:137,141:141,145:145,146:146,38:38,62:62,64:64,70:70,81:81}],280:[function(t,n,r){var e=t(62);e(e.G+e.W+e.F*!t(146).ABV,{DataView:t(145).DataView})},{145:145,146:146,62:62}],281:[function(t,n,r){t(144)("Float32",4,function(e){return function Float32Array(t,n,r){return e(this,t,n,r)}})},{144:144}],282:[function(t,n,r){t(144)("Float64",8,function(e){return function Float64Array(t,n,r){return e(this,t,n,r)}})},{144:144}],283:[function(t,n,r){t(144)("Int16",2,function(e){return function Int16Array(t,n,r){return e(this,t,n,r)}})},{144:144}],284:[function(t,n,r){t(144)("Int32",4,function(e){return function Int32Array(t,n,r){return e(this,t,n,r)}})},{144:144}],285:[function(t,n,r){t(144)("Int8",1,function(e){return function Int8Array(t,n,r){return e(this,t,n,r)}})},{144:144}],286:[function(t,n,r){t(144)("Uint16",2,function(e){return function Uint16Array(t,n,r){return e(this,t,n,r)}})},{144:144}],287:[function(t,n,r){t(144)("Uint32",4,function(e){return function Uint32Array(t,n,r){return e(this,t,n,r)}})},{144:144}],288:[function(t,n,r){t(144)("Uint8",1,function(e){return function Uint8Array(t,n,r){return e(this,t,n,r)}})},{144:144}],289:[function(t,n,r){t(144)("Uint8",1,function(e){return function Uint8ClampedArray(t,n,r){return e(this,t,n,r)}},!0)},{144:144}],290:[function(t,n,r){"use strict";function R4(t){return function WeakMap(){return t(this,0<arguments.length?arguments[0]:void 0)}}var o,e=t(70),i=t(42)(0),u=t(118),c=t(94),a=t(97),f=t(50),s=t(81),l=t(149),h=t(149),p=!e.ActiveXObject&&"ActiveXObject"in e,v="WeakMap",g=c.getWeak,y=Object.isExtensible,d=f.ufstore,x={get:function get(t){if(s(t)){var n=g(t);return!0===n?d(l(this,v)).get(t):n?n[this._i]:void 0}},set:function set(t,n){return f.def(l(this,v),t,n)}},m=n.exports=t(51)(v,R4,x,f,!0,!0);h&&p&&(a((o=f.getConstructor(R4,v)).prototype,x),c.NEED=!0,i(["delete","has","get","set"],function(e){var t=m.prototype,i=t[e];u(t,e,function(t,n){if(!s(t)||y(t))return i.call(this,t,n);this._f||(this._f=new o);var r=this._f[e](t,n);return"set"==e?this:r})}))},{118:118,149:149,42:42,50:50,51:51,70:70,81:81,94:94,97:97}],291:[function(t,n,r){"use strict";var e=t(50),i=t(149),o="WeakSet";t(51)(o,function(t){return function WeakSet(){return t(this,0<arguments.length?arguments[0]:void 0)}},{add:function add(t){return e.def(i(this,o),t,!0)}},e,!1,!0)},{149:149,50:50,51:51}],292:[function(t,n,r){"use strict";var e=t(62),i=t(67),o=t(142),u=t(141),c=t(33),a=t(45);e(e.P,"Array",{flatMap:function flatMap(t){var n,r,e=o(this);return c(t),n=u(e.length),r=a(e,0),i(r,e,e,n,0,1,t,arguments[1]),r}}),t(35)("flatMap")},{141:141,142:142,33:33,35:35,45:45,62:62,67:67}],293:[function(t,n,r){"use strict";var e=t(62),i=t(41)(!0);e(e.P,"Array",{includes:function includes(t){return i(this,t,1<arguments.length?arguments[1]:void 0)}}),t(35)("includes")},{35:35,41:41,62:62}],294:[function(t,n,r){var e=t(62),i=t(110)(!0);e(e.S,"Object",{entries:function entries(t){return i(t)}})},{110:110,62:62}],295:[function(t,n,r){var e=t(62),a=t(111),f=t(140),s=t(101),l=t(53);e(e.S,"Object",{getOwnPropertyDescriptors:function getOwnPropertyDescriptors(t){for(var n,r,e=f(t),i=s.f,o=a(e),u={},c=0;o.length>c;)void 0!==(r=i(e,n=o[c++]))&&l(u,n,r);return u}})},{101:101,111:111,140:140,53:53,62:62}],296:[function(t,n,r){var e=t(62),i=t(110)(!1);e(e.S,"Object",{values:function values(t){return i(t)}})},{110:110,62:62}],297:[function(t,n,r){"use strict";var e=t(62),i=t(52),o=t(70),u=t(127),c=t(115);e(e.P+e.R,"Promise",{finally:function(n){var r=u(this,i.Promise||o.Promise),t="function"==typeof n;return this.then(t?function(t){return c(r,n()).then(function(){return t})}:n,t?function(t){return c(r,n()).then(function(){throw t})}:n)}})},{115:115,127:127,52:52,62:62,70:70}],298:[function(t,n,r){"use strict";var e=t(62),i=t(132),o=t(148),u=/Version\/10\.\d+(\.\d+)?( Mobile\/\w+)? Safari\//.test(o);e(e.P+e.F*u,"String",{padEnd:function padEnd(t){return i(this,t,1<arguments.length?arguments[1]:void 0,!1)}})},{132:132,148:148,62:62}],299:[function(t,n,r){"use strict";var e=t(62),i=t(132),o=t(148),u=/Version\/10\.\d+(\.\d+)?( Mobile\/\w+)? Safari\//.test(o);e(e.P+e.F*u,"String",{padStart:function padStart(t){return i(this,t,1<arguments.length?arguments[1]:void 0,!0)}})},{132:132,148:148,62:62}],300:[function(t,n,r){"use strict";t(134)("trimLeft",function(t){return function trimLeft(){return t(this,1)}},"trimStart")},{134:134}],301:[function(t,n,r){"use strict";t(134)("trimRight",function(t){return function trimRight(){return t(this,2)}},"trimEnd")},{134:134}],302:[function(t,n,r){t(150)("asyncIterator")},{150:150}],303:[function(t,n,r){for(var e=t(164),i=t(107),o=t(118),u=t(70),c=t(72),a=t(88),f=t(152),s=f("iterator"),l=f("toStringTag"),h=a.Array,p={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},v=i(p),g=0;g<v.length;g++){var y,d=v[g],x=p[d],m=u[d],S=m&&m.prototype;if(S&&(S[s]||c(S,s,h),S[l]||c(S,l,d),a[d]=h,x))for(y in e)S[y]||o(S,y,e[y],!0)}},{107:107,118:118,152:152,164:164,70:70,72:72,88:88}],304:[function(t,n,r){var e=t(62),i=t(136);e(e.G+e.B,{setImmediate:i.set,clearImmediate:i.clear})},{136:136,62:62}],305:[function(t,n,r){function y7(i){return function(t,n){var r=2<arguments.length,e=r&&u.call(arguments,2);return i(r?function(){("function"==typeof t?t:Function(t)).apply(this,e)}:t,n)}}var e=t(70),i=t(62),o=t(148),u=[].slice,c=/MSIE .\./.test(o);i(i.G+i.B+i.F*c,{setTimeout:y7(e.setTimeout),setInterval:y7(e.setInterval)})},{148:148,62:62,70:70}],306:[function(t,n,r){t(305),t(304),t(303),n.exports=t(52)},{303:303,304:304,305:305,52:52}],307:[function(t,n,r){var e=function(o){"use strict";var c,t=Object.prototype,a=t.hasOwnProperty,n="function"==typeof Symbol?Symbol:{},i=n.iterator||"@@iterator",r=n.asyncIterator||"@@asyncIterator",e=n.toStringTag||"@@toStringTag";function wrap(t,n,r,e){var i=n&&n.prototype instanceof Generator?n:Generator,o=Object.create(i.prototype),u=new Context(e||[]);return o._invoke=function makeInvokeMethod(o,u,c){var a=f;return function invoke(t,n){if(a===l)throw new Error("Generator is already running");if(a===h){if("throw"===t)throw n;return doneResult()}for(c.method=t,c.arg=n;;){var r=c.delegate;if(r){var e=maybeInvokeDelegate(r,c);if(e){if(e===p)continue;return e}}if("next"===c.method)c.sent=c._sent=c.arg;else if("throw"===c.method){if(a===f)throw a=h,c.arg;c.dispatchException(c.arg)}else"return"===c.method&&c.abrupt("return",c.arg);a=l;var i=tryCatch(o,u,c);if("normal"===i.type){if(a=c.done?h:s,i.arg===p)continue;return{value:i.arg,done:c.done}}"throw"===i.type&&(a=h,c.method="throw",c.arg=i.arg)}}}(t,r,u),o}function tryCatch(t,n,r){try{return{type:"normal",arg:t.call(n,r)}}catch(t){return{type:"throw",arg:t}}}o.wrap=wrap;var f="suspendedStart",s="suspendedYield",l="executing",h="completed",p={};function Generator(){}function GeneratorFunction(){}function GeneratorFunctionPrototype(){}var u={};u[i]=function(){return this};var v=Object.getPrototypeOf,g=v&&v(v(values([])));g&&g!==t&&a.call(g,i)&&(u=g);var y=GeneratorFunctionPrototype.prototype=Generator.prototype=Object.create(u);function defineIteratorMethods(t){["next","throw","return"].forEach(function(n){t[n]=function(t){return this._invoke(n,t)}})}function AsyncIterator(c){var t;this._invoke=function enqueue(r,e){function callInvokeWithMethodAndArg(){return new Promise(function(t,n){!function invoke(t,n,r,e){var i=tryCatch(c[t],c,n);if("throw"!==i.type){var o=i.arg,u=o.value;return u&&"object"==typeof u&&a.call(u,"__await")?Promise.resolve(u.__await).then(function(t){invoke("next",t,r,e)},function(t){invoke("throw",t,r,e)}):Promise.resolve(u).then(function(t){o.value=t,r(o)},function(t){return invoke("throw",t,r,e)})}e(i.arg)}(r,e,t,n)})}return t=t?t.then(callInvokeWithMethodAndArg,callInvokeWithMethodAndArg):callInvokeWithMethodAndArg()}}function maybeInvokeDelegate(t,n){var r=t.iterator[n.method];if(r===c){if(n.delegate=null,"throw"===n.method){if(t.iterator.return&&(n.method="return",n.arg=c,maybeInvokeDelegate(t,n),"throw"===n.method))return p;n.method="throw",n.arg=new TypeError("The iterator does not provide a 'throw' method")}return p}var e=tryCatch(r,t.iterator,n.arg);if("throw"===e.type)return n.method="throw",n.arg=e.arg,n.delegate=null,p;var i=e.arg;return i?i.done?(n[t.resultName]=i.value,n.next=t.nextLoc,"return"!==n.method&&(n.method="next",n.arg=c),n.delegate=null,p):i:(n.method="throw",n.arg=new TypeError("iterator result is not an object"),n.delegate=null,p)}function pushTryEntry(t){var n={tryLoc:t[0]};1 in t&&(n.catchLoc=t[1]),2 in t&&(n.finallyLoc=t[2],n.afterLoc=t[3]),this.tryEntries.push(n)}function resetTryEntry(t){var n=t.completion||{};n.type="normal",delete n.arg,t.completion=n}function Context(t){this.tryEntries=[{tryLoc:"root"}],t.forEach(pushTryEntry,this),this.reset(!0)}function values(t){if(t){var n=t[i];if(n)return n.call(t);if("function"==typeof t.next)return t;if(!isNaN(t.length)){var r=-1,e=function next(){for(;++r<t.length;)if(a.call(t,r))return next.value=t[r],next.done=!1,next;return next.value=c,next.done=!0,next};return e.next=e}}return{next:doneResult}}function doneResult(){return{value:c,done:!0}}return GeneratorFunction.prototype=y.constructor=GeneratorFunctionPrototype,GeneratorFunctionPrototype.constructor=GeneratorFunction,GeneratorFunctionPrototype[e]=GeneratorFunction.displayName="GeneratorFunction",o.isGeneratorFunction=function(t){var n="function"==typeof t&&t.constructor;return!!n&&(n===GeneratorFunction||"GeneratorFunction"===(n.displayName||n.name))},o.mark=function(t){return Object.setPrototypeOf?Object.setPrototypeOf(t,GeneratorFunctionPrototype):(t.__proto__=GeneratorFunctionPrototype,e in t||(t[e]="GeneratorFunction")),t.prototype=Object.create(y),t},o.awrap=function(t){return{__await:t}},defineIteratorMethods(AsyncIterator.prototype),AsyncIterator.prototype[r]=function(){return this},o.AsyncIterator=AsyncIterator,o.async=function(t,n,r,e){var i=new AsyncIterator(wrap(t,n,r,e));return o.isGeneratorFunction(n)?i:i.next().then(function(t){return t.done?t.value:i.next()})},defineIteratorMethods(y),y[e]="Generator",y[i]=function(){return this},y.toString=function(){return"[object Generator]"},o.keys=function(n){var r=[];for(var t in n)r.push(t);return r.reverse(),function next(){for(;r.length;){var t=r.pop();if(t in n)return next.value=t,next.done=!1,next}return next.done=!0,next}},o.values=values,Context.prototype={constructor:Context,reset:function(t){if(this.prev=0,this.next=0,this.sent=this._sent=c,this.done=!1,this.delegate=null,this.method="next",this.arg=c,this.tryEntries.forEach(resetTryEntry),!t)for(var n in this)"t"===n.charAt(0)&&a.call(this,n)&&!isNaN(+n.slice(1))&&(this[n]=c)},stop:function(){this.done=!0;var t=this.tryEntries[0].completion;if("throw"===t.type)throw t.arg;return this.rval},dispatchException:function(r){if(this.done)throw r;var e=this;function handle(t,n){return i.type="throw",i.arg=r,e.next=t,n&&(e.method="next",e.arg=c),!!n}for(var t=this.tryEntries.length-1;0<=t;--t){var n=this.tryEntries[t],i=n.completion;if("root"===n.tryLoc)return handle("end");if(n.tryLoc<=this.prev){var o=a.call(n,"catchLoc"),u=a.call(n,"finallyLoc");if(o&&u){if(this.prev<n.catchLoc)return handle(n.catchLoc,!0);if(this.prev<n.finallyLoc)return handle(n.finallyLoc)}else if(o){if(this.prev<n.catchLoc)return handle(n.catchLoc,!0)}else{if(!u)throw new Error("try statement without catch or finally");if(this.prev<n.finallyLoc)return handle(n.finallyLoc)}}}},abrupt:function(t,n){for(var r=this.tryEntries.length-1;0<=r;--r){var e=this.tryEntries[r];if(e.tryLoc<=this.prev&&a.call(e,"finallyLoc")&&this.prev<e.finallyLoc){var i=e;break}}i&&("break"===t||"continue"===t)&&i.tryLoc<=n&&n<=i.finallyLoc&&(i=null);var o=i?i.completion:{};return o.type=t,o.arg=n,i?(this.method="next",this.next=i.finallyLoc,p):this.complete(o)},complete:function(t,n){if("throw"===t.type)throw t.arg;return"break"===t.type||"continue"===t.type?this.next=t.arg:"return"===t.type?(this.rval=this.arg=t.arg,this.method="return",this.next="end"):"normal"===t.type&&n&&(this.next=n),p},finish:function(t){for(var n=this.tryEntries.length-1;0<=n;--n){var r=this.tryEntries[n];if(r.finallyLoc===t)return this.complete(r.completion,r.afterLoc),resetTryEntry(r),p}},catch:function(t){for(var n=this.tryEntries.length-1;0<=n;--n){var r=this.tryEntries[n];if(r.tryLoc===t){var e=r.completion;if("throw"===e.type){var i=e.arg;resetTryEntry(r)}return i}}throw new Error("illegal catch attempt")},delegateYield:function(t,n,r){return this.delegate={iterator:values(t),resultName:n,nextLoc:r},"next"===this.method&&(this.arg=c),p}},o}("object"==typeof n?n.exports:{});try{regeneratorRuntime=e}catch(t){Function("r","regeneratorRuntime = r")(e)}},{}]},{},[1]); |
||
trigger.go | package mapping
import (
"github.com/blevesearch/bleve"
"github.com/blevesearch/bleve/mapping"
"github.com/moira-alert/moira"
)
// TriggerField is used as enum
type TriggerField int
// Constants used as enum
const (
TriggerID TriggerField = iota
TriggerName
TriggerDesc
TriggerTags
TriggerLastCheckScore
)
var triggerFieldNames = []string{
"ID",
"Name",
"Desc",
"Tags",
"LastCheckScore"}
// Trigger represents Moira.Trigger type for full-text search index. It includes only indexed fields
type Trigger struct {
ID string
Name string
Desc string
Tags []string
LastCheckScore int64
}
// Type returns string with type name. It is used for Bleve.Search
func (Trigger) Type() string {
return "moira.indexed.trigger"
}
// String returns TriggerField name. It works like enum
func (field TriggerField) String() string {
return triggerFieldNames[field]
}
// GetDocumentMapping returns Bleve.mapping.DocumentMapping for Trigger type
func (Trigger) GetDocumentMapping() *mapping.DocumentMapping {
triggerMapping := bleve.NewDocumentStaticMapping()
triggerMapping.AddFieldMappingsAt(TriggerName.String(), getStandardMapping())
triggerMapping.AddFieldMappingsAt(TriggerTags.String(), getKeywordMapping())
triggerMapping.AddFieldMappingsAt(TriggerDesc.String(), getStandardMapping())
triggerMapping.AddFieldMappingsAt(TriggerLastCheckScore.String(), getNumericMapping())
return triggerMapping
}
// CreateIndexedTrigger creates mapping.Trigger object out of moira.TriggerCheck
func CreateIndexedTrigger(triggerCheck *moira.TriggerCheck) Trigger | {
return Trigger{
ID: triggerCheck.ID,
Name: triggerCheck.Name,
Desc: moira.UseString(triggerCheck.Desc),
Tags: triggerCheck.Tags,
LastCheckScore: triggerCheck.LastCheck.Score,
}
} |
|
buffer.rs | use crate::Publisher;
impl<'a, T: 'a + Send> Publisher<'a, T> {
pub fn buffer(self) -> Publisher<'a, T> |
}
| {
unimplemented!()
} |
cleaner.py | #! /usr/bin/python
from flask import Flask, request, jsonify
import boto3
import os
from queue import Queue
from threading import Thread
import time
s3 = boto3.client('s3')
s3_raw = boto3.resource('s3').Bucket('isitanime-data-raw')
s3_dest = boto3.resource('s3').Bucket('isitanime-data-clean')
app = Flask(__name__)
@app.route('/')
def main():
with open('main.html', 'r') as in_f:
html = in_f.read()
return html
@app.route('/keys')
def | ():
prefix = request.args.get('prefix', 'safebooru')
keys = get_keys(prefix, 100)
return jsonify(keys)
classify_queue = Queue()
@app.route('/classify')
def classify():
key = request.args.get('key')
clss = request.args.get('class')
assert clss in {'anime', 'notanime', 'delete'}
classify_queue.put((key, clss))
return '', 200
def classify_thread():
while True:
try:
key, clss = classify_queue.get()
classify_back(key, clss)
except Exception:
pass
def classify_back(name, clss):
copy_source = {
'Bucket': 'isitanime-data-raw',
'Key': name,
}
if clss != 'delete':
s3_dest.copy(copy_source, clss + '-' + name)
s3_raw.delete_objects(
Delete={
'Objects': [{
'Key': name,
}],
'Quiet': True,
}
)
print('S3 cleaned ' + name + ' == ' + clss)
s3_key_cache = {}
s3_marker_next = {}
def get_keys(prefix, count):
if prefix not in s3_key_cache:
s3_key_cache[prefix] = []
if prefix not in s3_marker_next:
if s3_key_cache[prefix]:
s3_marker_next[prefix] = s3_key_cache[prefix][-1]
else:
s3_marker_next[prefix] = None
key_cache = s3_key_cache[prefix]
marker_next = s3_marker_next[prefix]
while count > len(key_cache):
if marker_next:
resp = s3.list_objects(
Bucket='isitanime-data-raw',
Prefix=prefix,
Marker=marker_next,
)
else:
resp = s3.list_objects(
Bucket='isitanime-data-raw',
Prefix=prefix,
)
if 'Contents' not in resp:
count = len(key_cache)
print(resp)
break
key_cache.extend([obj['Key'] for obj in resp['Contents']])
s3_marker_next[prefix] = key_cache[-1]
if not resp['IsTruncated']:
count = len(key_cache)
break
print(key_cache)
s3_key_cache[prefix] = key_cache[count:]
return key_cache[:count]
if __name__ == '__main__':
boto_threadpool = []
for _ in range(5):
t = Thread(target=classify_thread)
boto_threadpool.append(t)
t.start()
app.run('127.0.0.1', port=8080)
| keys |
project.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import tempfile
import zipfile
from threading import Lock, Thread
from typing import Text, List
import six
import time
from builtins import object
from requests.exceptions import InvalidURL, RequestException
from rasa_nlu import utils
from rasa_nlu.classifiers.keyword_intent_classifier import \
KeywordIntentClassifier
from rasa_nlu.model import Metadata, Interpreter
from rasa_nlu.utils import is_url, EndpointConfig
if six.PY2:
from StringIO import StringIO as IOReader
else:
from io import BytesIO as IOReader
logger = logging.getLogger(__name__)
MODEL_NAME_PREFIX = "model_"
FALLBACK_MODEL_NAME = "fallback"
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
def load_from_server(component_builder=None, # type: Optional[Text]
project=None, # type: Optional[Text]
project_dir=None, # type: Optional[Text]
remote_storage=None, # type: Optional[Text]
model_server=None, # type: Optional[EndpointConfig]
wait_time_between_pulls=None, # type: Optional[int]
):
# type: (...) -> Project
"""Load a persisted model from a server."""
project = Project(component_builder=component_builder,
project=project,
project_dir=project_dir,
remote_storage=remote_storage)
_update_model_from_server(model_server, project)
if wait_time_between_pulls:
# continuously pull the model every `wait_time_between_pulls` seconds
start_model_pulling_in_worker(model_server,
wait_time_between_pulls,
project)
return project
def _update_model_from_server(model_server, project):
# type: (EndpointConfig, Project) -> None
"""Load a zipped Rasa NLU model from a URL and update the passed
project."""
if not is_url(model_server.url):
raise InvalidURL(model_server)
model_directory = tempfile.mkdtemp()
new_model_fingerprint, filename = _pull_model_and_fingerprint(
model_server, model_directory, project.fingerprint)
if new_model_fingerprint:
model_name = _get_remote_model_name(filename)
project.fingerprint = new_model_fingerprint
project.update_model_from_dir_and_unload_others(model_directory,
model_name)
else:
logger.debug("No new model found at URL {}".format(model_server.url))
def _get_remote_model_name(filename):
# type: (Optional[Text]) -> Text
"""Get the name to save a model under that was fetched from a
remote server."""
if filename is not None: # use the filename header if present
return filename.strip(".zip")
else: # or else use a timestamp
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return MODEL_NAME_PREFIX + timestamp
def _pull_model_and_fingerprint(model_server, model_directory, fingerprint):
# type: (EndpointConfig, Text, Optional[Text]) -> (Optional[Text], Optional[Text])
"""Queries the model server and returns a tuple of containing the
response's <ETag> header which contains the model hash, and the
<filename> header containing the model name."""
header = {"If-None-Match": fingerprint}
try:
logger.debug("Requesting model from server {}..."
"".format(model_server.url))
response = model_server.request(method="GET",
headers=header,
timeout=DEFAULT_REQUEST_TIMEOUT)
except RequestException as e:
logger.warning("Tried to fetch model from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
return None, None
if response.status_code == 204:
logger.debug("Model server returned 204 status code, indicating "
"that no new model is available. "
"Current fingerprint: {}".format(fingerprint))
return response.headers.get("ETag"), response.headers.get("filename")
elif response.status_code == 404:
logger.debug("Model server didn't find a model for our request. "
"Probably no one did train a model for the project "
"and tag combination yet.")
return None, None
elif response.status_code != 200:
logger.warn("Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(response.status_code))
return None, None
zip_ref = zipfile.ZipFile(IOReader(response.content))
zip_ref.extractall(model_directory)
logger.debug("Unzipped model to {}"
"".format(os.path.abspath(model_directory)))
# get the new fingerprint and filename
return response.headers.get("ETag"), response.headers.get("filename")
def _run_model_pulling_worker(model_server, wait_time_between_pulls, project):
# type: (Text, int, Project) -> None
while True:
_update_model_from_server(model_server, project)
time.sleep(wait_time_between_pulls)
def start_model_pulling_in_worker(model_server, wait_time_between_pulls,
project):
# type: (Text, int, Project) -> None
worker = Thread(target=_run_model_pulling_worker,
args=(model_server, wait_time_between_pulls, project))
worker.setDaemon(True)
worker.start()
class Project(object):
def __init__(self,
component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
fingerprint=None):
self._component_builder = component_builder
self._models = {}
self.status = 0
self.current_training_processes = 0
self._reader_lock = Lock()
self._loader_lock = Lock()
self._writer_lock = Lock()
self._readers_count = 0
self._path = None
self._project = project
self.remote_storage = remote_storage
self.fingerprint = fingerprint
if project and project_dir:
self._path = os.path.join(project_dir, project)
self._search_for_models()
def _begin_read(self):
# Readers-writer lock basic double mutex implementation
self._reader_lock.acquire()
self._readers_count += 1
if self._readers_count == 1:
self._writer_lock.acquire()
self._reader_lock.release()
def _end_read(self):
self._reader_lock.acquire()
self._readers_count -= 1
if self._readers_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def _load_local_model(self, requested_model_name=None):
if requested_model_name is None: # user want latest model
# NOTE: for better parse performance, currently although
# user may want latest model by set requested_model_name
# explicitly to None, we are not refresh model list
# from local and cloud which is pretty slow.
# User can specific requested_model_name to the latest model name,
# then model will be cached, this is a kind of workaround to
# refresh latest project model.
# BTW if refresh function is wanted, maybe add implement code to
# `_latest_project_model()` is a good choice.
logger.debug("No model specified. Using default")
return self._latest_project_model()
elif requested_model_name in self._models: # model exists in cache
return requested_model_name
return None # local model loading failed!
def _dynamic_load_model(self, requested_model_name=None):
# type: (Text) -> Text
# first try load from local cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# now model not exists in model list cache
# refresh model list from local and cloud
# NOTE: if a malicious user sent lots of requests
# with not existing model will cause performance issue.
# because get anything from cloud is a time-consuming task
self._search_for_models()
# retry after re-fresh model cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# still not found user specified model
logger.warn("Invalid model requested. Using default")
return self._latest_project_model()
def parse(self, text, time=None, requested_model_name=None):
self._begin_read()
model_name = self._dynamic_load_model(requested_model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
finally:
self._loader_lock.release()
response = self._models[model_name].parse(text, time)
response['project'] = self._project
response['model'] = model_name
self._end_read()
return response
def load_model(self):
self._begin_read()
status = False
model_name = self._dynamic_load_model()
logger.debug('Loading model %s', model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update_model_from_dir_and_unload_others(self,
model_dir, # type: Text
model_name # type: Text
):
# unload all loaded models
for model in self._list_loaded_models():
self.unload(model)
self._begin_read()
status = False
logger.debug('Loading model {} from directory {}'.format(
model_name, model_dir))
self._loader_lock.acquire()
try:
interpreter = self._interpreter_for_model(
model_name, model_dir)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update(self, model_name):
self._writer_lock.acquire()
self._models[model_name] = None
self._writer_lock.release()
def unload(self, model_name):
self._writer_lock.acquire()
try:
del self._models[model_name]
self._models[model_name] = None
return model_name
finally:
self._writer_lock.release()
def _latest_project_model(self):
"""Retrieves the latest trained model for an project"""
models = {model[len(MODEL_NAME_PREFIX):]: model
for model in self._models.keys()
if model.startswith(MODEL_NAME_PREFIX)}
if models:
time_list = [datetime.datetime.strptime(time, '%Y%m%d-%H%M%S')
for time, model in models.items()]
return models[max(time_list).strftime('%Y%m%d-%H%M%S')]
else:
return FALLBACK_MODEL_NAME
def _fallback_model(self):
meta = Metadata({"pipeline": [{
"name": "intent_classifier_keyword",
"class": utils.module_path_from_object(KeywordIntentClassifier())
}]}, "")
return Interpreter.create(meta, self._component_builder)
def _search_for_models(self):
model_names = (self._list_models_in_dir(self._path) +
self._list_models_in_cloud())
if not model_names:
if FALLBACK_MODEL_NAME not in self._models:
self._models[FALLBACK_MODEL_NAME] = self._fallback_model()
else:
for model in set(model_names):
if model not in self._models:
self._models[model] = None
def _interpreter_for_model(self, model_name, model_dir=None):
metadata = self._read_model_metadata(model_name, model_dir)
return Interpreter.create(metadata, self._component_builder)
def _read_model_metadata(self, model_name, model_dir):
if model_name is None:
data = Project._default_model_metadata()
return Metadata(data, model_name)
else:
if model_dir is not None:
path = model_dir
elif not os.path.isabs(model_name) and self._path:
path = os.path.join(self._path, model_name)
else:
path = model_name
# download model from cloud storage if needed and possible
if not os.path.isdir(path):
self._load_model_from_cloud(model_name, path)
return Metadata.load(path)
def as_dict(self):
return {'status': 'training' if self.status else 'ready',
'current_training_processes': self.current_training_processes,
'available_models': list(self._models.keys()),
'loaded_models': self._list_loaded_models()}
def _list_loaded_models(self):
models = []
for model, interpreter in self._models.items():
if interpreter is not None:
models.append(model)
return models
def _list_models_in_cloud(self):
# type: () -> List[Text]
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
return p.list_models(self._project)
else:
return []
except Exception as e:
logger.warn("Failed to list models of project {}. "
"{}".format(self._project, e))
return []
def _load_model_from_cloud(self, model_name, target_path):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
p.retrieve(model_name, self._project, target_path)
else:
raise RuntimeError("Unable to initialize persistor")
except Exception as e:
logger.warn("Using default interpreter, couldn't fetch "
"model: {}".format(e))
raise # re-raise this exception because nothing we can do now
@staticmethod
def _default_model_metadata():
return {
"language": None,
}
@staticmethod
def _list_models_in_dir(path):
if not path or not os.path.isdir(path):
return []
else:
| return [os.path.relpath(model, path)
for model in utils.list_subdirectories(path)] |
|
test_gui.py | import os
from montreal_forced_aligner.corpus.acoustic_corpus import AcousticCorpus
| ):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=basic_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="acoustic_corpus").save()
def test_file_properties(
stereo_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=stereo_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
file = corpus.get_file(name="michaelandsickmichael")
assert file.sound_file.num_channels == 2
assert file.num_speakers == 2
assert file.num_utterances == 7
x, y = file.sound_file.normalized_waveform()
assert y.shape[0] == 2
def test_flac_tg(flac_tg_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=flac_tg_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="61-70968-0000").save() |
def test_save_text_lab(
basic_corpus_dir,
generated_dir, |
selection_model.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use crate::{Bitset, SelectionModel};
use gio::subclass::prelude::*;
use glib::translate::*;
use glib::Cast;
pub trait SelectionModelImpl: ListModelImpl {
fn get_selection_in_range(&self, model: &Self::Type, position: u32, n_items: u32) -> Bitset {
self.parent_get_selection_in_range(model, position, n_items)
}
fn is_selected(&self, model: &Self::Type, position: u32) -> bool {
self.parent_is_selected(model, position)
}
fn select_all(&self, model: &Self::Type) -> bool {
self.parent_select_all(model)
}
fn select_item(&self, model: &Self::Type, position: u32, unselect_rest: bool) -> bool {
self.parent_select_item(model, position, unselect_rest)
}
fn select_range(
&self,
model: &Self::Type,
position: u32,
n_items: u32,
unselect_rest: bool,
) -> bool {
self.parent_select_range(model, position, n_items, unselect_rest)
}
fn set_selection(&self, model: &Self::Type, selected: &Bitset, mask: &Bitset) -> bool {
self.parent_set_selection(model, selected, mask)
}
fn unselect_all(&self, model: &Self::Type) -> bool {
self.parent_unselect_all(model)
}
fn unselect_item(&self, model: &Self::Type, position: u32) -> bool {
self.parent_unselect_item(model, position)
}
fn | (&self, model: &Self::Type, position: u32, n_items: u32) -> bool {
self.parent_unselect_range(model, position, n_items)
}
}
pub trait SelectionModelImplExt: ObjectSubclass {
fn parent_get_selection_in_range(
&self,
model: &Self::Type,
position: u32,
n_items: u32,
) -> Bitset;
fn parent_is_selected(&self, model: &Self::Type, position: u32) -> bool;
fn parent_select_all(&self, model: &Self::Type) -> bool;
fn parent_select_item(&self, model: &Self::Type, position: u32, unselect_rest: bool) -> bool;
fn parent_select_range(
&self,
model: &Self::Type,
position: u32,
n_items: u32,
unselect_rest: bool,
) -> bool;
fn parent_set_selection(&self, model: &Self::Type, selected: &Bitset, mask: &Bitset) -> bool;
fn parent_unselect_all(&self, model: &Self::Type) -> bool;
fn parent_unselect_item(&self, model: &Self::Type, position: u32) -> bool;
fn parent_unselect_range(&self, model: &Self::Type, position: u32, n_items: u32) -> bool;
}
impl<T: SelectionModelImpl> SelectionModelImplExt for T {
fn parent_get_selection_in_range(
&self,
model: &Self::Type,
position: u32,
n_items: u32,
) -> Bitset {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.get_selection_in_range
.expect("no parent \"get_selection_in_range\" implementation");
from_glib_full(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
n_items,
))
}
}
fn parent_is_selected(&self, model: &Self::Type, position: u32) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.is_selected
.expect("no parent \"is_selected\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
))
}
}
fn parent_select_all(&self, model: &Self::Type) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.select_all
.expect("no parent \"select_all\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
))
}
}
fn parent_select_item(&self, model: &Self::Type, position: u32, unselect_rest: bool) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.select_item
.expect("no parent \"select_item\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
unselect_rest.to_glib(),
))
}
}
fn parent_select_range(
&self,
model: &Self::Type,
position: u32,
n_items: u32,
unselect_rest: bool,
) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.select_range
.expect("no parent \"select_range\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
n_items,
unselect_rest.to_glib(),
))
}
}
fn parent_set_selection(&self, model: &Self::Type, selected: &Bitset, mask: &Bitset) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.set_selection
.expect("no parent \"set_selection\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
selected.to_glib_none().0,
mask.to_glib_none().0,
))
}
}
fn parent_unselect_all(&self, model: &Self::Type) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.unselect_all
.expect("no parent \"unselect_all\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
))
}
}
fn parent_unselect_item(&self, model: &Self::Type, position: u32) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.unselect_item
.expect("no parent \"unselect_item\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
))
}
}
fn parent_unselect_range(&self, model: &Self::Type, position: u32, n_items: u32) -> bool {
unsafe {
let type_data = Self::type_data();
let parent_iface = type_data.as_ref().get_parent_interface::<SelectionModel>()
as *const ffi::GtkSelectionModelInterface;
let func = (*parent_iface)
.unselect_range
.expect("no parent \"unselect_range\" implementation");
from_glib(func(
model.unsafe_cast_ref::<SelectionModel>().to_glib_none().0,
position,
n_items,
))
}
}
}
unsafe impl<T: SelectionModelImpl> IsImplementable<T> for SelectionModel {
fn interface_init(iface: &mut glib::Interface<Self>) {
let iface = iface.as_mut();
iface.get_selection_in_range = Some(model_get_selection_in_range::<T>);
iface.is_selected = Some(model_is_selected::<T>);
iface.select_all = Some(model_select_all::<T>);
iface.select_item = Some(model_select_item::<T>);
iface.select_range = Some(model_select_range::<T>);
iface.set_selection = Some(model_set_selection::<T>);
iface.unselect_all = Some(model_unselect_all::<T>);
iface.unselect_item = Some(model_unselect_item::<T>);
iface.unselect_range = Some(model_unselect_range::<T>);
}
fn instance_init(_instance: &mut glib::subclass::InitializingObject<T>) {}
}
unsafe extern "C" fn model_get_selection_in_range<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
n_items: u32,
) -> *mut ffi::GtkBitset {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.get_selection_in_range(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
n_items,
)
.to_glib_full()
}
unsafe extern "C" fn model_is_selected<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.is_selected(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
)
.to_glib()
}
unsafe extern "C" fn model_select_all<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.select_all(from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref())
.to_glib()
}
unsafe extern "C" fn model_select_item<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
unselect_rest: glib::ffi::gboolean,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.select_item(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
from_glib(unselect_rest),
)
.to_glib()
}
unsafe extern "C" fn model_select_range<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
n_items: u32,
unselect_rest: glib::ffi::gboolean,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.select_range(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
n_items,
from_glib(unselect_rest),
)
.to_glib()
}
unsafe extern "C" fn model_set_selection<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
selected_ptr: *mut ffi::GtkBitset,
mask_ptr: *mut ffi::GtkBitset,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<SelectionModel> = from_glib_borrow(model);
let selected = from_glib_borrow(selected_ptr);
let mask = from_glib_borrow(mask_ptr);
imp.set_selection(wrap.unsafe_cast_ref(), &selected, &mask)
.to_glib()
}
unsafe extern "C" fn model_unselect_all<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.unselect_all(from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref())
.to_glib()
}
unsafe extern "C" fn model_unselect_item<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.unselect_item(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
)
.to_glib()
}
unsafe extern "C" fn model_unselect_range<T: SelectionModelImpl>(
model: *mut ffi::GtkSelectionModel,
position: u32,
n_items: u32,
) -> glib::ffi::gboolean {
let instance = &*(model as *mut T::Instance);
let imp = instance.impl_();
imp.unselect_range(
from_glib_borrow::<_, SelectionModel>(model).unsafe_cast_ref(),
position,
n_items,
)
.to_glib()
}
| unselect_range |
mod.rs | //! The instruction architecture of the `wasmi` interpreter.
mod utils;
mod visitor;
#[cfg(test)]
mod tests;
pub use self::{
utils::{BrTable, DropKeep, FuncIdx, GlobalIdx, LocalIdx, Offset, SignatureIdx, Target},
visitor::VisitInstruction,
};
use super::value_stack::StackEntry;
/// The internal `wasmi` bytecode that is stored for Wasm functions.
///
/// # Note
///
/// This representation slightly differs from WebAssembly instructions.
///
/// For example the `BrTable` instruciton is unrolled into separate instructions
/// each representing either the `BrTable` head or one of its branching targets.
#[derive(Copy, Debug, Clone, PartialEq)]
pub enum | {
GetLocal {
local_depth: LocalIdx,
},
SetLocal {
local_depth: LocalIdx,
},
TeeLocal {
local_depth: LocalIdx,
},
Br(Target),
BrIfEqz(Target),
BrIfNez(Target),
BrTable {
len_targets: usize,
},
BrTableTarget(Target),
Unreachable,
Return(DropKeep),
Call(FuncIdx),
CallIndirect(SignatureIdx),
Drop,
Select,
GetGlobal(GlobalIdx),
SetGlobal(GlobalIdx),
I32Load(Offset),
I64Load(Offset),
F32Load(Offset),
F64Load(Offset),
I32Load8S(Offset),
I32Load8U(Offset),
I32Load16S(Offset),
I32Load16U(Offset),
I64Load8S(Offset),
I64Load8U(Offset),
I64Load16S(Offset),
I64Load16U(Offset),
I64Load32S(Offset),
I64Load32U(Offset),
I32Store(Offset),
I64Store(Offset),
F32Store(Offset),
F64Store(Offset),
I32Store8(Offset),
I32Store16(Offset),
I64Store8(Offset),
I64Store16(Offset),
I64Store32(Offset),
CurrentMemory,
GrowMemory,
Const(StackEntry),
I32Eqz,
I32Eq,
I32Ne,
I32LtS,
I32LtU,
I32GtS,
I32GtU,
I32LeS,
I32LeU,
I32GeS,
I32GeU,
I64Eqz,
I64Eq,
I64Ne,
I64LtS,
I64LtU,
I64GtS,
I64GtU,
I64LeS,
I64LeU,
I64GeS,
I64GeU,
F32Eq,
F32Ne,
F32Lt,
F32Gt,
F32Le,
F32Ge,
F64Eq,
F64Ne,
F64Lt,
F64Gt,
F64Le,
F64Ge,
I32Clz,
I32Ctz,
I32Popcnt,
I32Add,
I32Sub,
I32Mul,
I32DivS,
I32DivU,
I32RemS,
I32RemU,
I32And,
I32Or,
I32Xor,
I32Shl,
I32ShrS,
I32ShrU,
I32Rotl,
I32Rotr,
I64Clz,
I64Ctz,
I64Popcnt,
I64Add,
I64Sub,
I64Mul,
I64DivS,
I64DivU,
I64RemS,
I64RemU,
I64And,
I64Or,
I64Xor,
I64Shl,
I64ShrS,
I64ShrU,
I64Rotl,
I64Rotr,
F32Abs,
F32Neg,
F32Ceil,
F32Floor,
F32Trunc,
F32Nearest,
F32Sqrt,
F32Add,
F32Sub,
F32Mul,
F32Div,
F32Min,
F32Max,
F32Copysign,
F64Abs,
F64Neg,
F64Ceil,
F64Floor,
F64Trunc,
F64Nearest,
F64Sqrt,
F64Add,
F64Sub,
F64Mul,
F64Div,
F64Min,
F64Max,
F64Copysign,
I32WrapI64,
I32TruncSF32,
I32TruncUF32,
I32TruncSF64,
I32TruncUF64,
I64ExtendSI32,
I64ExtendUI32,
I64TruncSF32,
I64TruncUF32,
I64TruncSF64,
I64TruncUF64,
F32ConvertSI32,
F32ConvertUI32,
F32ConvertSI64,
F32ConvertUI64,
F32DemoteF64,
F64ConvertSI32,
F64ConvertUI32,
F64ConvertSI64,
F64ConvertUI64,
F64PromoteF32,
I32ReinterpretF32,
I64ReinterpretF64,
F32ReinterpretI32,
F64ReinterpretI64,
/// The start of a Wasm function body.
///
/// - This stores the `wasmi` bytecode length of the function body as well
/// as the amount of local variables.
/// - Note that the length of the `wasmi` bytecode might differ from the length
/// of the original WebAssembly bytecode.
/// - The types of the local variables do not matter since all stack values
/// are equally sized with 64-bits per value. Storing the amount of local
/// variables eliminates one indirection when calling a Wasm function.
///
/// # Note
///
/// This is a non-WebAssembly instruction that is specific to how the `wasmi`
/// interpreter organizes its internal bytecode.
FuncBodyStart {
/// This field represents the amount of instruction of the function body.
///
/// Note: This does not include any meta instructions such as
/// [`Instruction::FuncBodyStart`] or [`Instruction::FuncBodyEnd`].
len_instructions: u32,
/// Represents the number of local variables of the function body.
///
/// Note: The types of the locals do not matter since all stack values
/// use 64-bit encoding in the `wasmi` bytecode interpreter.
/// Note: Storing the amount of locals inline with the rest of the
/// function body eliminates one indirection when calling a function.
len_locals: u32,
max_stack_height: u32,
},
/// The end of a Wasm function body.
///
/// # Note
///
/// This is a non-WebAssembly instruction that is specific to how the `wasmi`
/// interpreter organizes its internal bytecode.
FuncBodyEnd,
}
impl Instruction {
/// Creates a new `Const` instruction from the given value.
pub fn constant<T>(value: T) -> Self
where
T: Into<StackEntry>,
{
Self::Const(value.into())
}
}
| Instruction |
test_load_files.py | import os
from programy.config.file.factory import ConfigurationFactory
from programy.clients.events.console.config import ConsoleConfiguration
from programytest.config.file.base_file_tests import ConfigurationBaseFileTests
# Hint
# Created the appropriate yaml file, then convert to json and xml using the following tool
# https://codebeautify.org/yaml-to-json-xml-csv
class LoadConfigurationDataTests(ConfigurationBaseFileTests):
def test_load_config_data_yaml(self):
config_data = ConfigurationFactory.load_configuration_from_file(ConsoleConfiguration(), os.path.dirname(__file__)+ os.sep + "test_yaml.yaml")
self.assert_configuration(config_data)
def test_load_config_data_json(self):
|
def test_load_config_data_xml(self):
config_data = ConfigurationFactory.load_configuration_from_file(ConsoleConfiguration(), os.path.dirname(__file__)+ os.sep + "test_xml.xml")
self.assert_configuration(config_data)
| config_data = ConfigurationFactory.load_configuration_from_file(ConsoleConfiguration(), os.path.dirname(__file__)+ os.sep + "test_json.json")
self.assert_configuration(config_data) |
mod.rs | //! Tools for controlling behavior in an ECS application.
//!
//! Systems define how an ECS based application behaves. They have to be registered to a
//! [`SystemStage`](crate::schedule::SystemStage) to be able to run. A system is usually
//! written as a normal function that will be automatically converted into a system.
//!
//! System functions can have parameters, through which one can query and mutate Bevy ECS state.
//! Only types that implement [`SystemParam`] can be used, automatically fetching data from
//! the [`World`](crate::world::World).
//!
//! System functions often look like this:
//!
//! ```
//! # use bevy_ecs::prelude::*;
//! #
//! # #[derive(Component)]
//! # struct Player { alive: bool }
//! # #[derive(Component)]
//! # struct Score(u32);
//! # struct Round(u32);
//! #
//! fn update_score_system(
//! mut query: Query<(&Player, &mut Score)>,
//! mut round: ResMut<Round>,
//! ) {
//! for (player, mut score) in query.iter_mut() {
//! if player.alive {
//! score.0 += round.0;
//! }
//! }
//! round.0 += 1;
//! }
//! # bevy_ecs::system::assert_is_system(update_score_system);
//! ```
//!
//! # System ordering
//!
//! While the execution of systems is usually parallel and not deterministic, there are two
//! ways to determine a certain degree of execution order:
//!
//! - **System Stages:** They determine hard execution synchronization boundaries inside of
//! which systems run in parallel by default.
//! - **Labeling:** First, systems are labeled upon creation by calling `.label()`. Then,
//! methods such as `.before()` and `.after()` are appended to systems to determine
//! execution order in respect to other systems.
//!
//! # System parameter list
//! Following is the complete list of accepted types as system parameters:
//!
//! - [`Query`]
//! - [`Res`] and `Option<Res>`
//! - [`ResMut`] and `Option<ResMut>`
//! - [`Commands`]
//! - [`Local`]
//! - [`EventReader`](crate::event::EventReader)
//! - [`EventWriter`](crate::event::EventWriter)
//! - [`NonSend`] and `Option<NonSend>`
//! - [`NonSendMut`] and `Option<NonSendMut>`
//! - [`&World`](crate::world::World)
//! - [`RemovedComponents`]
//! - [`SystemChangeTick`]
//! - [`Archetypes`](crate::archetype::Archetypes) (Provides Archetype metadata)
//! - [`Bundles`](crate::bundle::Bundles) (Provides Bundles metadata)
//! - [`Components`](crate::component::Components) (Provides Components metadata)
//! - [`Entities`](crate::entity::Entities) (Provides Entities metadata)
//! - All tuples between 1 to 16 elements where each element implements [`SystemParam`]
//! - [`()` (unit primitive type)](https://doc.rust-lang.org/stable/std/primitive.unit.html)
mod commands;
mod exclusive_system;
mod function_system;
mod query;
#[allow(clippy::module_inception)]
mod system;
mod system_chaining;
mod system_param;
pub use commands::*;
pub use exclusive_system::*;
pub use function_system::*;
pub use query::*;
pub use system::*;
pub use system_chaining::*;
pub use system_param::*;
pub fn assert_is_system<In, Out, Params, S: IntoSystem<In, Out, Params>>(sys: S) {
if false {
// Check it can be converted into a system
IntoSystem::into_system(sys);
}
}
#[cfg(test)]
mod tests {
use std::any::TypeId;
use crate::{
self as bevy_ecs,
archetype::Archetypes,
bundle::Bundles,
component::{Component, Components},
entity::{Entities, Entity},
query::{Added, Changed, Or, QueryState, With, Without},
schedule::{Schedule, Stage, SystemStage},
system::{
IntoExclusiveSystem, IntoSystem, Local, NonSend, NonSendMut, Query, QuerySet,
RemovedComponents, Res, ResMut, System, SystemState,
},
world::{FromWorld, World},
};
#[derive(Component, Debug, Eq, PartialEq, Default)]
struct A;
#[derive(Component)]
struct B;
#[derive(Component)]
struct C;
#[derive(Component)]
struct D;
#[derive(Component)]
struct E;
#[derive(Component)]
struct F;
#[derive(Component)]
struct W<T>(T);
#[test]
fn simple_system() {
fn sys(query: Query<&A>) {
for a in query.iter() {
println!("{:?}", a);
}
}
let mut system = IntoSystem::into_system(sys);
let mut world = World::new();
world.spawn().insert(A);
system.initialize(&mut world);
for archetype in world.archetypes.iter() {
system.new_archetype(archetype);
}
system.run((), &mut world);
}
fn run_system<Param, S: IntoSystem<(), (), Param>>(world: &mut World, system: S) {
let mut schedule = Schedule::default();
let mut update = SystemStage::parallel();
update.add_system(system);
schedule.add_stage("update", update);
schedule.run(world);
}
#[test]
fn query_system_gets() {
fn query_system(
mut ran: ResMut<bool>,
entity_query: Query<Entity, With<A>>,
b_query: Query<&B>,
a_c_query: Query<(&A, &C)>,
d_query: Query<&D>,
) {
let entities = entity_query.iter().collect::<Vec<Entity>>();
assert!(
b_query.get_component::<B>(entities[0]).is_err(),
"entity 0 should not have B"
);
assert!(
b_query.get_component::<B>(entities[1]).is_ok(),
"entity 1 should have B"
);
assert!(
b_query.get_component::<A>(entities[1]).is_err(),
"entity 1 should have A, but b_query shouldn't have access to it"
);
assert!(
b_query.get_component::<D>(entities[3]).is_err(),
"entity 3 should have D, but it shouldn't be accessible from b_query"
);
assert!(
b_query.get_component::<C>(entities[2]).is_err(),
"entity 2 has C, but it shouldn't be accessible from b_query"
);
assert!(
a_c_query.get_component::<C>(entities[2]).is_ok(),
"entity 2 has C, and it should be accessible from a_c_query"
);
assert!(
a_c_query.get_component::<D>(entities[3]).is_err(),
"entity 3 should have D, but it shouldn't be accessible from b_query"
);
assert!(
d_query.get_component::<D>(entities[3]).is_ok(),
"entity 3 should have D"
);
*ran = true;
}
let mut world = World::default();
world.insert_resource(false);
world.spawn().insert_bundle((A,));
world.spawn().insert_bundle((A, B));
world.spawn().insert_bundle((A, C));
world.spawn().insert_bundle((A, D));
run_system(&mut world, query_system);
assert!(*world.resource::<bool>(), "system ran");
}
#[test]
fn or_query_set_system() {
// Regression test for issue #762
fn query_system(
mut ran: ResMut<bool>,
mut set: QuerySet<(
QueryState<(), Or<(Changed<A>, Changed<B>)>>,
QueryState<(), Or<(Added<A>, Added<B>)>>,
)>,
) {
let changed = set.q0().iter().count();
let added = set.q1().iter().count();
assert_eq!(changed, 1);
assert_eq!(added, 1);
*ran = true;
}
let mut world = World::default();
world.insert_resource(false);
world.spawn().insert_bundle((A, B));
run_system(&mut world, query_system);
assert!(*world.resource::<bool>(), "system ran");
}
#[test]
fn changed_resource_system() {
struct Added(usize);
struct Changed(usize);
fn incr_e_on_flip(
value: Res<bool>,
mut changed: ResMut<Changed>,
mut added: ResMut<Added>,
) {
if value.is_added() {
added.0 += 1;
}
if value.is_changed() {
changed.0 += 1;
}
}
let mut world = World::default();
world.insert_resource(false);
world.insert_resource(Added(0));
world.insert_resource(Changed(0));
let mut schedule = Schedule::default();
let mut update = SystemStage::parallel();
update.add_system(incr_e_on_flip);
schedule.add_stage("update", update);
schedule.add_stage(
"clear_trackers",
SystemStage::single(World::clear_trackers.exclusive_system()),
);
schedule.run(&mut world);
assert_eq!(world.resource::<Added>().0, 1);
assert_eq!(world.resource::<Changed>().0, 1);
schedule.run(&mut world);
assert_eq!(world.resource::<Added>().0, 1);
assert_eq!(world.resource::<Changed>().0, 1);
*world.resource_mut::<bool>() = true;
schedule.run(&mut world);
assert_eq!(world.resource::<Added>().0, 1);
assert_eq!(world.resource::<Changed>().0, 2);
}
#[test]
#[should_panic]
fn conflicting_query_mut_system() {
fn sys(_q1: Query<&mut A>, _q2: Query<&mut A>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
fn disjoint_query_mut_system() {
fn sys(_q1: Query<&mut A, With<B>>, _q2: Query<&mut A, Without<B>>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
fn disjoint_query_mut_read_component_system() {
fn sys(_q1: Query<(&mut A, &B)>, _q2: Query<&mut A, Without<B>>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
#[should_panic]
fn conflicting_query_immut_system() {
fn sys(_q1: Query<&A>, _q2: Query<&mut A>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
fn query_set_system() {
fn sys(mut _set: QuerySet<(QueryState<&mut A>, QueryState<&A>)>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
#[should_panic]
fn conflicting_query_with_query_set_system() {
fn sys(_query: Query<&mut A>, _set: QuerySet<(QueryState<&mut A>, QueryState<&B>)>) {}
let mut world = World::default();
run_system(&mut world, sys);
}
#[test]
#[should_panic]
fn conflicting_query_sets_system() {
fn sys(
_set_1: QuerySet<(QueryState<&mut A>,)>,
_set_2: QuerySet<(QueryState<&mut A>, QueryState<&B>)>,
) {
}
let mut world = World::default();
run_system(&mut world, sys);
}
#[derive(Default)]
struct BufferRes {
_buffer: Vec<u8>,
}
fn test_for_conflicting_resources<Param, S: IntoSystem<(), (), Param>>(sys: S) {
let mut world = World::default();
world.insert_resource(BufferRes::default());
world.insert_resource(A);
world.insert_resource(B);
run_system(&mut world, sys);
}
#[test]
#[should_panic]
fn conflicting_system_resources() {
fn sys(_: ResMut<BufferRes>, _: Res<BufferRes>) {}
test_for_conflicting_resources(sys);
}
#[test]
#[should_panic]
fn conflicting_system_resources_reverse_order() {
fn sys(_: Res<BufferRes>, _: ResMut<BufferRes>) {}
test_for_conflicting_resources(sys);
}
#[test]
#[should_panic]
fn conflicting_system_resources_multiple_mutable() {
fn sys(_: ResMut<BufferRes>, _: ResMut<BufferRes>) {}
test_for_conflicting_resources(sys);
}
#[test]
fn nonconflicting_system_resources() {
fn sys(_: Local<BufferRes>, _: ResMut<BufferRes>, _: Local<A>, _: ResMut<A>) {}
test_for_conflicting_resources(sys);
}
#[test]
fn local_system() {
let mut world = World::default();
world.insert_resource(1u32);
world.insert_resource(false);
struct Foo {
value: u32,
}
impl FromWorld for Foo {
fn from_world(world: &mut World) -> Self {
Foo {
value: *world.resource::<u32>() + 1,
}
}
}
fn sys(local: Local<Foo>, mut modified: ResMut<bool>) {
assert_eq!(local.value, 2);
*modified = true;
}
run_system(&mut world, sys);
// ensure the system actually ran
assert!(*world.resource::<bool>());
}
#[test]
fn non_send_option_system() {
let mut world = World::default();
world.insert_resource(false);
struct NotSend1(std::rc::Rc<i32>);
struct NotSend2(std::rc::Rc<i32>);
world.insert_non_send_resource(NotSend1(std::rc::Rc::new(0)));
fn sys(
op: Option<NonSend<NotSend1>>,
mut _op2: Option<NonSendMut<NotSend2>>,
mut run: ResMut<bool>,
) {
op.expect("NonSend should exist");
*run = true;
}
run_system(&mut world, sys);
// ensure the system actually ran
assert!(*world.resource::<bool>());
}
#[test]
fn non_send_system() {
let mut world = World::default();
world.insert_resource(false);
struct NotSend1(std::rc::Rc<i32>);
struct NotSend2(std::rc::Rc<i32>);
world.insert_non_send_resource(NotSend1(std::rc::Rc::new(1)));
world.insert_non_send_resource(NotSend2(std::rc::Rc::new(2)));
fn sys(_op: NonSend<NotSend1>, mut _op2: NonSendMut<NotSend2>, mut run: ResMut<bool>) {
*run = true;
}
run_system(&mut world, sys);
assert!(*world.resource::<bool>());
}
#[test]
fn removal_tracking() {
let mut world = World::new();
let entity_to_despawn = world.spawn().insert(W(1)).id();
let entity_to_remove_w_from = world.spawn().insert(W(2)).id();
let spurious_entity = world.spawn().id();
// Track which entities we want to operate on
struct Despawned(Entity);
world.insert_resource(Despawned(entity_to_despawn));
struct Removed(Entity);
world.insert_resource(Removed(entity_to_remove_w_from));
// Verify that all the systems actually ran
#[derive(Default)]
struct NSystems(usize);
world.insert_resource(NSystems::default());
// First, check that removal detection is triggered if and only if we despawn an entity with the correct component
world.entity_mut(entity_to_despawn).despawn();
world.entity_mut(spurious_entity).despawn();
fn validate_despawn(
removed_i32: RemovedComponents<W<i32>>,
despawned: Res<Despawned>,
mut n_systems: ResMut<NSystems>,
) {
assert_eq!(
removed_i32.iter().collect::<Vec<_>>(),
&[despawned.0],
"despawning causes the correct entity to show up in the 'RemovedComponent' system parameter."
);
n_systems.0 += 1;
}
run_system(&mut world, validate_despawn);
// Reset the trackers to clear the buffer of removed components
// Ordinarily, this is done in a system added by MinimalPlugins
world.clear_trackers();
// Then, try removing a component
world.spawn().insert(W(3));
world.spawn().insert(W(4));
world.entity_mut(entity_to_remove_w_from).remove::<W<i32>>();
fn validate_remove(
removed_i32: RemovedComponents<W<i32>>,
removed: Res<Removed>,
mut n_systems: ResMut<NSystems>,
) {
assert_eq!(
removed_i32.iter().collect::<Vec<_>>(),
&[removed.0],
"removing a component causes the correct entity to show up in the 'RemovedComponent' system parameter."
);
n_systems.0 += 1;
}
run_system(&mut world, validate_remove);
// Verify that both systems actually ran
assert_eq!(world.resource::<NSystems>().0, 2);
}
#[test]
fn world_collections_system() {
let mut world = World::default();
world.insert_resource(false);
world.spawn().insert_bundle((W(42), W(true)));
fn sys(
archetypes: &Archetypes,
components: &Components,
entities: &Entities,
bundles: &Bundles,
query: Query<Entity, With<W<i32>>>,
mut modified: ResMut<bool>,
) {
assert_eq!(query.iter().count(), 1, "entity exists");
for entity in query.iter() {
let location = entities.get(entity).unwrap();
let archetype = archetypes.get(location.archetype_id).unwrap();
let archetype_components = archetype.components().collect::<Vec<_>>();
let bundle_id = bundles
.get_id(std::any::TypeId::of::<(W<i32>, W<bool>)>())
.expect("Bundle used to spawn entity should exist");
let bundle_info = bundles.get(bundle_id).unwrap();
let mut bundle_components = bundle_info.components().to_vec();
bundle_components.sort();
for component_id in &bundle_components {
assert!(
components.get_info(*component_id).is_some(),
"every bundle component exists in Components"
);
}
assert_eq!(
bundle_components, archetype_components,
"entity's bundle components exactly match entity's archetype components"
);
}
*modified = true;
}
run_system(&mut world, sys);
// ensure the system actually ran
assert!(*world.resource::<bool>());
}
#[test]
fn get_system_conflicts() {
fn sys_x(_: Res<A>, _: Res<B>, _: Query<(&C, &D)>) {}
fn sys_y(_: Res<A>, _: ResMut<B>, _: Query<(&C, &mut D)>) {}
let mut world = World::default();
let mut x = IntoSystem::into_system(sys_x);
let mut y = IntoSystem::into_system(sys_y);
x.initialize(&mut world);
y.initialize(&mut world);
let conflicts = x.component_access().get_conflicts(y.component_access());
let b_id = world
.components()
.get_resource_id(TypeId::of::<B>())
.unwrap();
let d_id = world.components().get_id(TypeId::of::<D>()).unwrap();
assert_eq!(conflicts, vec![b_id, d_id]);
}
#[test]
fn query_is_empty() {
fn without_filter(not_empty: Query<&A>, empty: Query<&B>) {
assert!(!not_empty.is_empty());
assert!(empty.is_empty());
}
fn with_filter(not_empty: Query<&A, With<C>>, empty: Query<&A, With<D>>) {
assert!(!not_empty.is_empty());
assert!(empty.is_empty());
}
let mut world = World::default();
world.spawn().insert(A).insert(C);
let mut without_filter = IntoSystem::into_system(without_filter);
without_filter.initialize(&mut world);
without_filter.run((), &mut world);
let mut with_filter = IntoSystem::into_system(with_filter);
with_filter.initialize(&mut world);
with_filter.run((), &mut world);
}
#[test]
#[allow(clippy::too_many_arguments)]
fn can_have_16_parameters() {
fn sys_x(
_: Res<A>,
_: Res<B>,
_: Res<C>,
_: Res<D>,
_: Res<E>,
_: Res<F>,
_: Query<&A>,
_: Query<&B>,
_: Query<&C>,
_: Query<&D>,
_: Query<&E>,
_: Query<&F>,
_: Query<(&A, &B)>,
_: Query<(&C, &D)>,
_: Query<(&E, &F)>,
) {
}
fn sys_y(
_: (
Res<A>,
Res<B>,
Res<C>,
Res<D>,
Res<E>,
Res<F>,
Query<&A>,
Query<&B>,
Query<&C>,
Query<&D>,
Query<&E>,
Query<&F>,
Query<(&A, &B)>,
Query<(&C, &D)>,
Query<(&E, &F)>,
),
) {
}
let mut world = World::default();
let mut x = IntoSystem::into_system(sys_x);
let mut y = IntoSystem::into_system(sys_y);
x.initialize(&mut world);
y.initialize(&mut world);
}
#[test]
fn read_system_state() {
#[derive(Eq, PartialEq, Debug)]
struct A(usize);
#[derive(Component, Eq, PartialEq, Debug)]
struct B(usize);
let mut world = World::default();
world.insert_resource(A(42));
world.spawn().insert(B(7));
let mut system_state: SystemState<(
Res<A>,
Query<&B>,
QuerySet<(QueryState<&C>, QueryState<&D>)>,
)> = SystemState::new(&mut world);
let (a, query, _) = system_state.get(&world);
assert_eq!(*a, A(42), "returned resource matches initial value");
assert_eq!(
*query.single(),
B(7),
"returned component matches initial value"
);
}
#[test]
fn write_system_state() {
#[derive(Eq, PartialEq, Debug)]
struct A(usize);
#[derive(Component, Eq, PartialEq, Debug)]
struct B(usize);
let mut world = World::default();
world.insert_resource(A(42));
world.spawn().insert(B(7));
let mut system_state: SystemState<(ResMut<A>, Query<&mut B>)> =
SystemState::new(&mut world);
// The following line shouldn't compile because the parameters used are not ReadOnlySystemParam
// let (a, query) = system_state.get(&world);
let (a, mut query) = system_state.get_mut(&mut world);
assert_eq!(*a, A(42), "returned resource matches initial value");
assert_eq!(
*query.single_mut(),
B(7),
"returned component matches initial value"
);
}
#[test]
fn system_state_change_detection() {
#[derive(Component, Eq, PartialEq, Debug)]
struct A(usize);
let mut world = World::default();
let entity = world.spawn().insert(A(1)).id();
let mut system_state: SystemState<Query<&A, Changed<A>>> = SystemState::new(&mut world);
{
let query = system_state.get(&world);
assert_eq!(*query.single(), A(1));
}
{
let query = system_state.get(&world);
assert!(query.get_single().is_err());
}
world.entity_mut(entity).get_mut::<A>().unwrap().0 = 2;
{
let query = system_state.get(&world);
assert_eq!(*query.single(), A(2));
}
}
#[test]
#[should_panic]
fn system_state_invalid_world() {
let mut world = World::default();
let mut system_state = SystemState::<Query<&A>>::new(&mut world);
let mismatched_world = World::default();
system_state.get(&mismatched_world);
}
#[test]
fn system_state_archetype_update() {
#[derive(Component, Eq, PartialEq, Debug)]
struct A(usize);
#[derive(Component, Eq, PartialEq, Debug)]
struct B(usize);
let mut world = World::default();
world.spawn().insert(A(1));
let mut system_state = SystemState::<Query<&A>>::new(&mut world);
{
let query = system_state.get(&world);
assert_eq!(
query.iter().collect::<Vec<_>>(),
vec![&A(1)],
"exactly one component returned"
);
}
world.spawn().insert_bundle((A(2), B(2)));
{
let query = system_state.get(&world);
assert_eq!(
query.iter().collect::<Vec<_>>(),
vec![&A(1), &A(2)],
"components from both archetypes returned"
);
}
}
/// this test exists to show that read-only world-only queries can return data that lives as long as 'world
#[test]
#[allow(unused)]
fn long_life_test() {
struct Holder<'w> {
value: &'w A,
}
struct State {
state: SystemState<Res<'static, A>>,
state_q: SystemState<Query<'static, 'static, &'static A>>,
}
impl State {
fn hold_res<'w>(&mut self, world: &'w World) -> Holder<'w> |
fn hold_component<'w>(&mut self, world: &'w World, entity: Entity) -> Holder<'w> {
let q = self.state_q.get(world);
let a = q.get(entity).unwrap();
Holder { value: a }
}
fn hold_components<'w>(&mut self, world: &'w World) -> Vec<Holder<'w>> {
let mut components = Vec::new();
let q = self.state_q.get(world);
for a in q.iter() {
components.push(Holder { value: a });
}
components
}
}
}
#[test]
fn immutable_mut_test() {
#[derive(Component, Eq, PartialEq, Debug, Clone, Copy)]
struct A(usize);
let mut world = World::default();
world.spawn().insert(A(1));
world.spawn().insert(A(2));
let mut system_state = SystemState::<Query<&mut A>>::new(&mut world);
{
let mut query = system_state.get_mut(&mut world);
assert_eq!(
query.iter_mut().map(|m| *m).collect::<Vec<A>>(),
vec![A(1), A(2)],
"both components returned by iter_mut of &mut"
);
assert_eq!(
query.iter().collect::<Vec<&A>>(),
vec![&A(1), &A(2)],
"both components returned by iter of &mut"
);
}
}
}
| {
let a = self.state.get(world);
Holder {
value: a.into_inner(),
}
} |
wallet.go | package wallet
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"sort"
"strings"
"sync"
"time"
qu "github.com/l0k18/pod/pkg/util/quit"
"github.com/davecgh/go-spew/spew"
blockchain "github.com/l0k18/pod/pkg/chain"
"github.com/l0k18/pod/pkg/chain/config/netparams"
chainhash "github.com/l0k18/pod/pkg/chain/hash"
txauthor "github.com/l0k18/pod/pkg/chain/tx/author"
wtxmgr "github.com/l0k18/pod/pkg/chain/tx/mgr"
txrules "github.com/l0k18/pod/pkg/chain/tx/rules"
txscript "github.com/l0k18/pod/pkg/chain/tx/script"
"github.com/l0k18/pod/pkg/chain/wire"
ec "github.com/l0k18/pod/pkg/coding/elliptic"
"github.com/l0k18/pod/pkg/db/walletdb"
"github.com/l0k18/pod/pkg/pod"
"github.com/l0k18/pod/pkg/rpc/btcjson"
rpcclient "github.com/l0k18/pod/pkg/rpc/client"
"github.com/l0k18/pod/pkg/util"
"github.com/l0k18/pod/pkg/util/hdkeychain"
waddrmgr "github.com/l0k18/pod/pkg/wallet/addrmgr"
"github.com/l0k18/pod/pkg/wallet/chain"
)
const (
// InsecurePubPassphrase is the default outer encryption passphrase used for public data (everything but private
// keys). Using a non-default public passphrase can prevent an attacker without the public passphrase from
// discovering all past and future wallet addresses if they gain access to the wallet database.
//
// NOTE: at time of writing, public encryption only applies to public data in the waddrmgr namespace. Transactions
// are not yet encrypted.
InsecurePubPassphrase = ""
// walletDbWatchingOnlyName = "wowallet.db" recoveryBatchSize is the default number of blocks that will be scanned
// successively by the recovery manager, in the event that the wallet is started in recovery mode.
recoveryBatchSize = 2000
)
// ErrNotSynced describes an error where an operation cannot complete due wallet being out of sync (and perhaps
// currently syncing with) the remote chain server.
var ErrNotSynced = errors.New("wallet is not synchronized with the chain server")
// Namespace bucket keys.
var (
waddrmgrNamespaceKey = []byte("waddrmgr")
wtxmgrNamespaceKey = []byte("wtxmgr")
)
// Wallet is a structure containing all the components for a complete wallet. It contains the Armory-style key store
// addresses and keys),
type Wallet struct {
publicPassphrase []byte
// Data stores
db walletdb.DB
Manager *waddrmgr.Manager
TxStore *wtxmgr.Store
chainClient chain.Interface
chainClientLock sync.Mutex
chainClientSynced bool
chainClientSyncMtx sync.Mutex
lockedOutpoints map[wire.OutPoint]struct{}
recoveryWindow uint32
// Channels for rescan processing. Requests are added and merged with any waiting requests, before being sent to
// another goroutine to call the rescan RPC.
rescanAddJob chan *RescanJob
rescanBatch chan *rescanBatch
rescanNotifications chan interface{} // From chain server
rescanProgress chan *RescanProgressMsg
rescanFinished chan *RescanFinishedMsg
// Channel for transaction creation requests.
createTxRequests chan createTxRequest
// Channels for the manager locker.
unlockRequests chan unlockRequest
lockRequests qu.C
holdUnlockRequests chan chan heldUnlock
lockState chan bool
changePassphrase chan changePassphraseRequest
changePassphrases chan changePassphrasesRequest
// Information for reorganization handling.
// reorganizingLock sync.Mutex
// reorganizeToHash chainhash.Hash
// reorganizing bool
NtfnServer *NotificationServer
PodConfig *pod.Config
chainParams *netparams.Params
wg sync.WaitGroup
started bool
quit qu.C
quitMu sync.Mutex
Update qu.C
}
// Start starts the goroutines necessary to manage a wallet.
func (w *Wallet) Start() {
Trace("starting wallet")
w.quitMu.Lock()
select {
case <-w.quit:
Trace("waiting for wallet shutdown")
// Restart the wallet goroutines after shutdown finishes.
w.WaitForShutdown()
w.quit = qu.T()
default:
if w.started {
// Ignore when the wallet is still running.
Info("wallet already started")
w.quitMu.Unlock()
return
}
w.started = true
}
w.quitMu.Unlock()
w.wg.Add(2)
go w.txCreator()
go w.walletLocker()
}
// SynchronizeRPC associates the wallet with the consensus RPC client, synchronizes the wallet with the latest changes
// to the blockchain, and continuously updates the wallet through RPC notifications.
//
// This method is unstable and will be removed when all syncing logic is moved outside of the wallet package.
func (w *Wallet) SynchronizeRPC(chainClient chain.Interface) {
w.quitMu.Lock()
select {
case <-w.quit:
w.quitMu.Unlock()
return
default:
}
w.quitMu.Unlock()
// TODO: Ignoring the new client when one is already set breaks callers
// who are replacing the client, perhaps after a disconnect.
w.chainClientLock.Lock()
if w.chainClient != nil {
w.chainClientLock.Unlock()
return
}
w.chainClient = chainClient
// If the chain client is a NeutrinoClient instance, set a birthday so we don't download all the filters as we go.
switch cc := chainClient.(type) {
case *chain.NeutrinoClient:
cc.SetStartTime(w.Manager.Birthday())
case *chain.BitcoindClient:
cc.SetBirthday(w.Manager.Birthday())
}
w.chainClientLock.Unlock()
// TODO: It would be preferable to either run these goroutines separately from the wallet (use wallet mutator
// functions to make changes from the RPC client) and not have to stop and restart them each time the client
// disconnects and reconnets.
w.wg.Add(4)
go w.handleChainNotifications()
go w.rescanBatchHandler()
go w.rescanProgressHandler()
go w.rescanRPCHandler()
}
// requireChainClient marks that a wallet method can only be completed when the consensus RPC server is set. This
// function and all functions that call it are unstable and will need to be moved when the syncing code is moved out of
// the wallet.
func (w *Wallet) requireChainClient() (chain.Interface, error) {
w.chainClientLock.Lock()
chainClient := w.chainClient
w.chainClientLock.Unlock()
if chainClient == nil {
return nil, errors.New("blockchain RPC is inactive")
}
return chainClient, nil
}
// ChainClient returns the optional consensus RPC client associated with the wallet.
//
// This function is unstable and will be removed once sync logic is moved out of the wallet.
func (w *Wallet) ChainClient() chain.Interface {
w.chainClientLock.Lock()
chainClient := w.chainClient
w.chainClientLock.Unlock()
return chainClient
}
// quitChan atomically reads the quit channel.
func (w *Wallet) quitChan() qu.C {
w.quitMu.Lock()
c := w.quit
w.quitMu.Unlock()
return c
}
// Stop signals all wallet goroutines to shutdown.
func (w *Wallet) Stop() {
w.quitMu.Lock()
select {
case <-w.quit:
default:
w.chainClientLock.Lock()
if w.chainClient != nil {
w.chainClient.Stop()
w.chainClient = nil
}
w.chainClientLock.Unlock()
// w.quit.Q()
// return
}
w.quitMu.Unlock()
}
// ShuttingDown returns whether the wallet is currently in the process of shutting down or not.
func (w *Wallet) ShuttingDown() bool {
select {
case <-w.quitChan():
return true
default:
return false
}
}
// WaitForShutdown blocks until all wallet goroutines have finished executing.
func (w *Wallet) WaitForShutdown() {
Debug("waiting for shutdown")
w.chainClientLock.Lock()
Debug("locked", w.chainClient)
if w.chainClient != nil {
Debug("calling WaitForShutdown")
w.chainClient.WaitForShutdown()
}
Debug("unlocking")
w.chainClientLock.Unlock()
// Debug("waiting on waitgroup")
// w.wg.Wait()
}
// SynchronizingToNetwork returns whether the wallet is currently synchronizing with the Bitcoin network.
func (w *Wallet) SynchronizingToNetwork() bool {
// At the moment, RPC is the only synchronization method. In the future, when SPV is added, a separate check will
// also be needed, or SPV could always be enabled if RPC was not explicitly specified when creating the wallet.
w.chainClientSyncMtx.Lock()
syncing := w.chainClient != nil
w.chainClientSyncMtx.Unlock()
return syncing
}
// ChainSynced returns whether the wallet has been attached to a chain server and synced up to the best block on the
// main chain.
func (w *Wallet) ChainSynced() bool {
w.chainClientSyncMtx.Lock()
synced := w.chainClientSynced
w.chainClientSyncMtx.Unlock()
return synced
}
// SetChainSynced marks whether the wallet is connected to and currently in sync with the latest block notified by the
// chain server.
//
// NOTE: Due to an API limitation with rpcclient, this may return true after the client disconnected (and is attempting
// a reconnect). This will be unknown until the reconnect notification is received, at which point the wallet can be
// marked out of sync again until after the next rescan completes.
func (w *Wallet) SetChainSynced(synced bool) {
w.chainClientSyncMtx.Lock()
w.chainClientSynced = synced
w.chainClientSyncMtx.Unlock()
}
// activeData returns the currently-active receiving addresses and all unspent outputs. This is primarily intended to
// provide the parameters for a rescan request.
func (w *Wallet) activeData(dbtx walletdb.ReadTx) ([]util.Address, []wtxmgr.Credit, error) {
addrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)
var addrs []util.Address
err := w.Manager.ForEachActiveAddress(
addrmgrNs, func(addr util.Address) error {
addrs = append(addrs, addr)
return nil
},
)
if err != nil {
Error(err)
return nil, nil, err
}
unspent, err := w.TxStore.UnspentOutputs(txmgrNs)
return addrs, unspent, err
}
// syncWithChain brings the wallet up to date with the current chain server connection. It creates a rescan request and
// blocks until the rescan has finished.
func (w *Wallet) syncWithChain() error {
chainClient, err := w.requireChainClient()
if err != nil {
Error(err)
return err
}
// Request notifications for transactions sending to all wallet addresses.
var (
addrs []util.Address
unspent []wtxmgr.Credit
)
err = walletdb.View(
w.db, func(dbtx walletdb.ReadTx) error {
var err error
addrs, unspent, err = w.activeData(dbtx)
return err
},
)
if err != nil {
Warn("error starting sync", err)
return err
}
startHeight := w.Manager.SyncedTo().Height
// We'll mark this as our first sync if we don't have any unspent outputs as known by the wallet. This will allow us
// to skip a full rescan at this height, and instead wait for the backend to catch up.
isInitialSync := len(unspent) == 0
isRecovery := w.recoveryWindow > 0
birthday := w.Manager.Birthday()
// If an initial sync is attempted, we will try and find the block stamp of the first block past our birthday. This
// will be fed into the rescan to ensure we catch transactions that are sent while performing the initial sync.
var birthdayStamp *waddrmgr.BlockStamp
// TODO(jrick): How should this handle a synced height earlier than the chain server best block? When no addresses
// have been generated for the wallet, the rescan can be skipped.
//
// TODO: This is only correct because activeData above returns all addresses ever created, including those that
// don't need to be watched anymore. This code should be updated when this assumption is no longer true, but worst
// case would result in an unnecessary rescan.
if isInitialSync || isRecovery {
// Find the latest checkpoint's height. This lets us catch up to at least that checkpoint, since we're
// synchronizing from scratch, and lets us avoid a bunch of costly DB transactions in the case when we're using
// BDB for the walletdb backend and Neutrino for the chain.Interface backend, and the chain backend starts
// synchronizing at the same time as the wallet.
_, bestHeight, err := chainClient.GetBestBlock()
if err != nil {
Error(err)
return err
}
Debug("bestHeight", bestHeight)
checkHeight := bestHeight
if len(w.chainParams.Checkpoints) > 0 {
checkHeight = w.chainParams.Checkpoints[len(
w.chainParams.Checkpoints,
)-1].Height
}
logHeight := checkHeight
if bestHeight > logHeight {
logHeight = bestHeight
}
Infof(
"catching up block hashes to height %d, this will take a while",
logHeight,
)
// Initialize the first database transaction.
tx, err := w.db.BeginReadWriteTx()
if err != nil {
Error(err)
return err
}
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
// Only allocate the recoveryMgr if we are actually in recovery mode.
recoveryMgr := &RecoveryManager{}
if isRecovery {
Info(
"RECOVERY MODE ENABLED -- rescanning for used addresses with recovery_window =",
w.recoveryWindow,
)
// Initialize the recovery manager with a default batch size of 2000.
recoveryMgr = NewRecoveryManager(
w.recoveryWindow, recoveryBatchSize,
w.chainParams,
)
// In the event that this recovery is being resumed, we will need to repopulate all found addresses from the
// database. For basic recovery, we will only do so for the default scopes.
scopedMgrs, err := w.defaultScopeManagers()
if err != nil {
Error(err)
return err
}
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
credits, err := w.TxStore.UnspentOutputs(txmgrNs)
if err != nil {
Error(err)
return err
}
err = recoveryMgr.Resurrect(ns, scopedMgrs, credits)
if err != nil {
Error(err)
return err
}
}
for height := startHeight; height <= bestHeight; height++ {
hash, err := chainClient.GetBlockHash(int64(height))
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
// If we're using the Neutrino backend, we can check if it's current or not. For other backends we'll assume
// it is current if the best height has reached the last checkpoint.
isCurrent := func(bestHeight int32) bool {
switch c := chainClient.(type) {
case *chain.NeutrinoClient:
return c.CS.IsCurrent()
}
return bestHeight >= checkHeight
}
// If we've found the best height the backend knows about, and the backend is still synchronizing, we'll
// wait. We can give it a little bit of time to synchronize further before updating the best height based on
// the backend. Once we see that the backend has advanced, we can catch up to it.
for height == bestHeight && !isCurrent(bestHeight) {
time.Sleep(100 * time.Millisecond)
_, bestHeight, err = chainClient.GetBestBlock()
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
}
header, err := chainClient.GetBlockHeader(hash)
if err != nil {
Error(err)
return err
}
// Check to see if this header's timestamp has surpassed our birthday or if we've surpassed one previously.
timestamp := header.Timestamp
if timestamp.After(birthday) || birthdayStamp != nil {
// If this is the first block past our birthday, record the block stamp so that we can use this as the
// starting point for the rescan. This will ensure we don't miss transactions that are sent to the
// wallet during an initial sync.
//
// NOTE: The birthday persisted by the wallet is two days before the actual wallet birthday, to deal
// with potentially inaccurate header timestamps.
if birthdayStamp == nil {
birthdayStamp = &waddrmgr.BlockStamp{
Height: height,
Hash: *hash,
Timestamp: timestamp,
}
}
// If we are in recovery mode and the check passes, we will add this block to our list of blocks to scan
// for recovered addresses.
if isRecovery {
recoveryMgr.AddToBlockBatch(
hash, height, timestamp,
)
}
}
err = w.Manager.SetSyncedTo(
ns, &waddrmgr.BlockStamp{
Hash: *hash,
Height: height,
Timestamp: timestamp,
},
)
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
// If we are in recovery mode, attempt a recovery on blocks that have been added to the recovery manager's
// block batch thus far. If block batch is empty, this will be a NOP.
if isRecovery && height%recoveryBatchSize == 0 {
err := w.recoverDefaultScopes(
chainClient, tx, ns,
recoveryMgr.BlockBatch(),
recoveryMgr.State(),
)
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
// Clear the batch of all processed blocks.
recoveryMgr.ResetBlockBatch()
}
// Every 10K blocks, commit and start a new database TX.
if height%10000 == 0 {
err = tx.Commit()
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
Info(
"caught up to height", height,
)
tx, err = w.db.BeginReadWriteTx()
if err != nil {
Error(err)
return err
}
ns = tx.ReadWriteBucket(waddrmgrNamespaceKey)
}
}
// Perform one last recovery attempt for all blocks that were not batched at the default granularity of 2000
// blocks.
if isRecovery {
err := w.recoverDefaultScopes(
chainClient, tx, ns, recoveryMgr.BlockBatch(),
recoveryMgr.State(),
)
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
}
// Commit (or roll back) the final database transaction.
err = tx.Commit()
if err != nil {
Error(err)
e := tx.Rollback()
if e != nil {
Error(err)
}
return err
}
Info("done catching up block hashes")
// Since we've spent some time catching up block hashes, we might have new addresses waiting for us that were
// requested during initial sync. Make sure we have those before we request a rescan later on.
err = walletdb.View(
w.db, func(dbtx walletdb.ReadTx) error {
var err error
addrs, unspent, err = w.activeData(dbtx)
return err
},
)
if err != nil {
Error(err)
return err
}
}
// Compare previously-seen blocks against the chain server. If any of these blocks no longer exist, rollback all of
// the missing blocks before catching up with the rescan.
rollback := false
rollbackStamp := w.Manager.SyncedTo()
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadWriteBucket(wtxmgrNamespaceKey)
for height := rollbackStamp.Height; true; height-- {
hash, err := w.Manager.BlockHash(addrmgrNs, height)
if err != nil {
Error(err)
return err
}
chainHash, err := chainClient.GetBlockHash(int64(height))
if err != nil {
Error(err)
return err
}
header, err := chainClient.GetBlockHeader(chainHash)
if err != nil {
Error(err)
return err
}
rollbackStamp.Hash = *chainHash
rollbackStamp.Height = height
rollbackStamp.Timestamp = header.Timestamp
if bytes.Equal(hash[:], chainHash[:]) {
break
}
rollback = true
}
if rollback {
err := w.Manager.SetSyncedTo(addrmgrNs, &rollbackStamp)
if err != nil {
Error(err)
return err
}
// Rollback unconfirms transactions at and beyond the passed height, so add one to the new synced-to height
// to prevent unconfirming txs from the synced-to block.
err = w.TxStore.Rollback(txmgrNs, rollbackStamp.Height+1)
if err != nil {
Error(err)
return err
}
}
return nil
},
)
if err != nil {
Error(err)
return err
}
// If a birthday stamp was found during the initial sync and the rollback causes us to revert it, update the
// birthday stamp so that it points at the new tip.
if birthdayStamp != nil && rollbackStamp.Height <= birthdayStamp.Height {
birthdayStamp = &rollbackStamp
}
// Request notifications for connected and disconnected blocks.
//
// TODO(jrick): Either request this notification only once, or when rpcclient is modified to allow some notification
// request to not automatically resent on reconnect, include the notifyblocks request as well. I am leaning towards
// allowing off all rpcclient notification re-registrations, in which case the code here should be left as is.
err = chainClient.NotifyBlocks()
if err != nil {
Error(err)
return err
}
return w.rescanWithTarget(addrs, unspent, birthdayStamp)
}
// defaultScopeManagers fetches the ScopedKeyManagers from the wallet using the default set of key scopes.
func (w *Wallet) defaultScopeManagers() (
map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager, error,
) {
scopedMgrs := make(map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager)
for _, scope := range waddrmgr.DefaultKeyScopes {
scopedMgr, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
scopedMgrs[scope] = scopedMgr
}
return scopedMgrs, nil
}
// recoverDefaultScopes attempts to recover any addresses belonging to any active scoped key managers known to the
// wallet. Recovery of each scope's default account will be done iteratively against the same batch of blocks.
//
// TODO(conner): parallelize/pipeline/cache intermediate network requests
func (w *Wallet) recoverDefaultScopes(
chainClient chain.Interface,
tx walletdb.ReadWriteTx,
ns walletdb.ReadWriteBucket,
batch []wtxmgr.BlockMeta,
recoveryState *RecoveryState,
) error {
scopedMgrs, err := w.defaultScopeManagers()
if err != nil {
Error(err)
return err
}
return w.recoverScopedAddresses(
chainClient, tx, ns, batch, recoveryState, scopedMgrs,
)
}
// recoverAccountAddresses scans a range of blocks in attempts to recover any previously used addresses for a particular
// account derivation path. At a high level, the algorithm works as follows:
//
// 1) Ensure internal and external branch horizons are fully expanded.
//
// 2) Filter the entire range of blocks, stopping if a non-zero number of address are contained in a particular block.
//
// 3) Record all internal and external addresses found in the block.
//
// 4) Record any outpoints found in the block that should be watched for spends
//
// 5) Trim the range of blocks up to and including the one reporting the addrs.
//
// 6) Repeat from (1) if there are still more blocks in the range.
func (w *Wallet) recoverScopedAddresses(
chainClient chain.Interface,
tx walletdb.ReadWriteTx,
ns walletdb.ReadWriteBucket,
batch []wtxmgr.BlockMeta,
recoveryState *RecoveryState,
scopedMgrs map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager,
) error {
// If there are no blocks in the batch, we are done.
if len(batch) == 0 {
return nil
}
Infof(
"scanning %d blocks for recoverable addresses",
len(batch),
)
expandHorizons:
for scope, scopedMgr := range scopedMgrs {
scopeState := recoveryState.StateForScope(scope)
err := expandScopeHorizons(ns, scopedMgr, scopeState)
if err != nil {
Error(err)
return err
}
}
// With the internal and external horizons properly expanded, we now construct the filter blocks request. The
// request includes the range of blocks we intend to scan, in addition to the scope-index -> addr map for all
// internal and external branches.
filterReq := newFilterBlocksRequest(batch, scopedMgrs, recoveryState)
// Initiate the filter blocks request using our chain backend. If an error occurs, we are unable to proceed with the
// recovery.
filterResp, err := chainClient.FilterBlocks(filterReq)
if err != nil {
Error(err)
return err
}
// If the filter response is empty, this signals that the rest of the batch was completed, and no other addresses
// were discovered. As a result, no further modifications to our recovery state are required and we can proceed to
// the next batch.
if filterResp == nil {
return nil
}
// Otherwise, retrieve the block info for the block that detected a non-zero number of address matches.
block := batch[filterResp.BatchIndex]
// Log any non-trivial findings of addresses or outpoints.
logFilterBlocksResp(block, filterResp)
// Report any external or internal addresses found as a result of the appropriate branch recovery state. Adding
// indexes above the last-found index of either will result in the horizons being expanded upon the next iteration.
// Any found addresses are also marked used using the scoped key manager.
err = extendFoundAddresses(ns, filterResp, scopedMgrs, recoveryState)
if err != nil {
Error(err)
return err
}
// Update the global set of watched outpoints with any that were found in the block.
for outPoint, addr := range filterResp.FoundOutPoints {
recoveryState.AddWatchedOutPoint(&outPoint, addr)
}
// Finally, record all of the relevant transactions that were returned in the filter blocks response. This ensures
// that these transactions and their outputs are tracked when the final rescan is performed.
for _, txn := range filterResp.RelevantTxns {
txRecord, err := wtxmgr.NewTxRecordFromMsgTx(
txn, filterResp.BlockMeta.Time,
)
if err != nil {
Error(err)
return err
}
err = w.addRelevantTx(tx, txRecord, &filterResp.BlockMeta)
if err != nil {
Error(err)
return err
}
}
// Update the batch to indicate that we've processed all block through the one that returned found addresses.
batch = batch[filterResp.BatchIndex+1:]
// If this was not the last block in the batch, we will repeat the filtering process again after expanding our
// horizons.
if len(batch) > 0 {
goto expandHorizons
}
return nil
}
// expandScopeHorizons ensures that the ScopeRecoveryState has an adequately sized look ahead for both its internal and
// external branches. The keys derived here are added to the scope's recovery state, but do not affect the persistent
// state of the wallet. If any invalid child keys are detected, the horizon will be properly extended such that our
// lookahead always includes the proper number of valid child keys.
func expandScopeHorizons(
ns walletdb.ReadWriteBucket,
scopedMgr *waddrmgr.ScopedKeyManager,
scopeState *ScopeRecoveryState,
) error {
// Compute the current external horizon and the number of addresses we must derive to ensure we maintain a
// sufficient recovery window for the external branch.
exHorizon, exWindow := scopeState.ExternalBranch.ExtendHorizon()
count, childIndex := uint32(0), exHorizon
for count < exWindow {
keyPath := externalKeyPath(childIndex)
addr, err := scopedMgr.DeriveFromKeyPath(ns, keyPath)
switch {
case err == hdkeychain.ErrInvalidChild:
// Record the existence of an invalid child with the external branch's recovery state. This also increments
// the branch's horizon so that it accounts for this skipped child index.
scopeState.ExternalBranch.MarkInvalidChild(childIndex)
childIndex++
continue
case err != nil:
return err
}
// Register the newly generated external address and child index with the external branch recovery state.
scopeState.ExternalBranch.AddAddr(childIndex, addr.Address())
childIndex++
count++
}
// Compute the current internal horizon and the number of addresses we must derive to ensure we maintain a
// sufficient recovery window for the internal branch.
inHorizon, inWindow := scopeState.InternalBranch.ExtendHorizon()
count, childIndex = 0, inHorizon
for count < inWindow {
keyPath := internalKeyPath(childIndex)
addr, err := scopedMgr.DeriveFromKeyPath(ns, keyPath)
switch {
case err == hdkeychain.ErrInvalidChild:
// Record the existence of an invalid child with the internal branch's recovery state. This also increments
// the branch's horizon so that it accounts for this skipped child index.
scopeState.InternalBranch.MarkInvalidChild(childIndex)
childIndex++
continue
case err != nil:
return err
}
// Register the newly generated internal address and child index with the internal branch recovery state.
scopeState.InternalBranch.AddAddr(childIndex, addr.Address())
childIndex++
count++
}
return nil
}
// externalKeyPath returns the relative external derivation path /0/0/index.
func externalKeyPath(index uint32) waddrmgr.DerivationPath {
return waddrmgr.DerivationPath{
Account: waddrmgr.DefaultAccountNum,
Branch: waddrmgr.ExternalBranch,
Index: index,
}
}
// internalKeyPath returns the relative internal derivation path /0/1/index.
func internalKeyPath(index uint32) waddrmgr.DerivationPath {
return waddrmgr.DerivationPath{
Account: waddrmgr.DefaultAccountNum,
Branch: waddrmgr.InternalBranch,
Index: index,
}
}
// newFilterBlocksRequest constructs FilterBlocksRequests using our current block range, scoped managers, and recovery
// state.
func newFilterBlocksRequest(
batch []wtxmgr.BlockMeta,
scopedMgrs map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager,
recoveryState *RecoveryState,
) *chain.FilterBlocksRequest {
filterReq := &chain.FilterBlocksRequest{
Blocks: batch,
ExternalAddrs: make(map[waddrmgr.ScopedIndex]util.Address),
InternalAddrs: make(map[waddrmgr.ScopedIndex]util.Address),
WatchedOutPoints: recoveryState.WatchedOutPoints(),
}
// Populate the external and internal addresses by merging the addresses sets belong to all currently tracked
// scopes.
for scope := range scopedMgrs {
scopeState := recoveryState.StateForScope(scope)
for index, addr := range scopeState.ExternalBranch.Addrs() {
scopedIndex := waddrmgr.ScopedIndex{
Scope: scope,
Index: index,
}
filterReq.ExternalAddrs[scopedIndex] = addr
}
for index, addr := range scopeState.InternalBranch.Addrs() {
scopedIndex := waddrmgr.ScopedIndex{
Scope: scope,
Index: index,
}
filterReq.InternalAddrs[scopedIndex] = addr
}
}
return filterReq
}
// extendFoundAddresses accepts a filter blocks response that contains addresses found on chain, and advances the state
// of all relevant derivation paths to match the highest found child index for each branch.
func extendFoundAddresses(
ns walletdb.ReadWriteBucket,
filterResp *chain.FilterBlocksResponse,
scopedMgrs map[waddrmgr.KeyScope]*waddrmgr.ScopedKeyManager,
recoveryState *RecoveryState,
) error {
// Mark all recovered external addresses as used. This will be done only for scopes that reported a non-zero number
// of external addresses in this block.
for scope, indexes := range filterResp.FoundExternalAddrs {
// First, report all external child indexes found for this scope. This ensures that the external last-found
// index will be updated to include the maximum child index seen thus far.
scopeState := recoveryState.StateForScope(scope)
for index := range indexes {
scopeState.ExternalBranch.ReportFound(index)
}
scopedMgr := scopedMgrs[scope]
// Now, with all found addresses reported, derive and extend all external addresses up to and including the
// current last found index for this scope.
exNextUnfound := scopeState.ExternalBranch.NextUnfound()
exLastFound := exNextUnfound
if exLastFound > 0 {
exLastFound--
}
err := scopedMgr.ExtendExternalAddresses(
ns, waddrmgr.DefaultAccountNum, exLastFound,
)
if err != nil {
Error(err)
return err
}
// Finally, with the scope's addresses extended, we mark used the external addresses that were found in the
// block and belong to this scope.
for index := range indexes {
addr := scopeState.ExternalBranch.GetAddr(index)
err := scopedMgr.MarkUsed(ns, addr)
if err != nil {
Error(err)
return err
}
}
}
// Mark all recovered internal addresses as used. This will be done only for scopes that reported a non-zero number
// of internal addresses in this block.
for scope, indexes := range filterResp.FoundInternalAddrs {
// First, report all internal child indexes found for this scope. This ensures that the internal last-found
// index will be updated to include the maximum child index seen thus far.
scopeState := recoveryState.StateForScope(scope)
for index := range indexes {
scopeState.InternalBranch.ReportFound(index)
}
scopedMgr := scopedMgrs[scope]
// Now, with all found addresses reported, derive and extend all internal addresses up to and including the
// current last found index for this scope.
inNextUnfound := scopeState.InternalBranch.NextUnfound()
inLastFound := inNextUnfound
if inLastFound > 0 {
inLastFound--
}
err := scopedMgr.ExtendInternalAddresses(
ns, waddrmgr.DefaultAccountNum, inLastFound,
)
if err != nil {
Error(err)
return err
}
// Finally, with the scope's addresses extended, we mark used the internal addresses that were found in the
// block and belong to this scope.
for index := range indexes {
addr := scopeState.InternalBranch.GetAddr(index)
err := scopedMgr.MarkUsed(ns, addr)
if err != nil {
Error(err)
return err
}
}
}
return nil
}
// logFilterBlocksResp provides useful logging information when filtering succeeded in finding relevant transactions.
func logFilterBlocksResp(
block wtxmgr.BlockMeta,
resp *chain.FilterBlocksResponse,
) {
// Log the number of external addresses found in this block.
var nFoundExternal int
for _, indexes := range resp.FoundExternalAddrs {
nFoundExternal += len(indexes)
}
if nFoundExternal > 0 {
Tracef(
"recovered %d external addrs at height=%d hash=%v",
nFoundExternal, block.Height, block.Hash,
)
}
// Log the number of internal addresses found in this block.
var nFoundInternal int
for _, indexes := range resp.FoundInternalAddrs {
nFoundInternal += len(indexes)
}
if nFoundInternal > 0 {
Tracef(
"recovered %d internal addrs at height=%d hash=%v",
nFoundInternal, block.Height, block.Hash,
)
}
// Log the number of outpoints found in this block.
nFoundOutPoints := len(resp.FoundOutPoints)
if nFoundOutPoints > 0 {
Tracef(
"found %d spends from watched outpoints at height=%d hash=%v",
nFoundOutPoints, block.Height, block.Hash,
)
}
}
type (
createTxRequest struct {
account uint32
outputs []*wire.TxOut
minconf int32
feeSatPerKB util.Amount
resp chan createTxResponse
}
createTxResponse struct {
tx *txauthor.AuthoredTx
err error
}
)
// txCreator is responsible for the input selection and creation of transactions. These functions are the responsibility
// of this method (designed to be run as its own goroutine) since input selection must be serialized, or else it is
// possible to create double spends by choosing the same inputs for multiple transactions. Along with input selection,
// this method is also responsible for the signing of transactions, since we don't want to end up in a situation where
// we run out of inputs as multiple transactions are being created. In this situation, it would then be possible for
// both requests, rather than just one, to fail due to not enough available inputs.
func (w *Wallet) txCreator() {
quit := w.quitChan()
out:
for {
select {
case txr := <-w.createTxRequests:
heldUnlock, err := w.holdUnlock()
if err != nil {
Error(err)
txr.resp <- createTxResponse{nil, err}
continue
}
tx, err := w.txToOutputs(
txr.outputs, txr.account,
txr.minconf, txr.feeSatPerKB,
)
heldUnlock.release()
txr.resp <- createTxResponse{tx, err}
case <-quit:
break out
}
}
w.wg.Done()
}
// CreateSimpleTx creates a new signed transaction spending unspent P2PKH outputs with at least minconf confirmations
// spending to any number of address/amount pairs. Change and an appropriate transaction fee are automatically included,
// if necessary. All transaction creation through this function is serialized to prevent the creation of many
// transactions which spend the same outputs.
func (w *Wallet) CreateSimpleTx(
account uint32, outputs []*wire.TxOut,
minconf int32, satPerKb util.Amount,
) (*txauthor.AuthoredTx, error) {
req := createTxRequest{
account: account,
outputs: outputs,
minconf: minconf,
feeSatPerKB: satPerKb,
resp: make(chan createTxResponse),
}
w.createTxRequests <- req
resp := <-req.resp
return resp.tx, resp.err
}
type (
unlockRequest struct {
passphrase []byte
lockAfter <-chan time.Time // nil prevents the timeout.
err chan error
}
changePassphraseRequest struct {
old, new []byte
private bool
err chan error
}
changePassphrasesRequest struct {
publicOld, publicNew []byte
privateOld, privateNew []byte
err chan error
}
// heldUnlock is a tool to prevent the wallet from automatically locking after some timeout before an operation
// which needed the unlocked wallet has finished. Any acquired heldUnlock *must* be released (preferably with a
// defer) or the wallet will forever remain unlocked.
heldUnlock qu.C
)
// walletLocker manages the locked/unlocked state of a wallet.
func (w *Wallet) walletLocker() {
var timeout <-chan time.Time
holdChan := make(heldUnlock)
quit := w.quitChan()
// this flips to false once the first unlock has been done, for runasservice option which shuts down on lock
// first := true
out:
for {
select {
case req := <-w.unlockRequests:
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
return w.Manager.Unlock(addrmgrNs, req.passphrase)
},
)
if err != nil {
Error(err)
req.err <- err
continue
}
timeout = req.lockAfter
if timeout == nil {
Info("the wallet has been unlocked without a time limit")
} else {
Info("the wallet has been temporarily unlocked")
}
req.err <- nil
continue
case req := <-w.changePassphrase:
err := walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
return w.Manager.ChangePassphrase(
addrmgrNs, req.old, req.new, req.private,
&waddrmgr.DefaultScryptOptions,
)
},
)
req.err <- err
continue
case req := <-w.changePassphrases:
err := walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
err := w.Manager.ChangePassphrase(
addrmgrNs, req.publicOld, req.publicNew,
false, &waddrmgr.DefaultScryptOptions,
)
if err != nil {
Error(err)
return err
}
return w.Manager.ChangePassphrase(
addrmgrNs, req.privateOld, req.privateNew,
true, &waddrmgr.DefaultScryptOptions,
)
},
)
req.err <- err
continue
case req := <-w.holdUnlockRequests:
if w.Manager.IsLocked() {
close(req)
continue
}
req <- holdChan
<-holdChan // Block until the lock is released.
// If, after holding onto the unlocked wallet for some time, the timeout has expired, lock it now instead of
// hoping it gets unlocked next time the top level select runs.
select {
case <-timeout:
// Let the top level select fallthrough so the wallet is locked.
default:
continue
}
case w.lockState <- w.Manager.IsLocked():
continue
case <-quit:
break out
case <-w.lockRequests:
// first = false
case <-timeout:
// first = false
}
// Select statement fell through by an explicit lock or the timer expiring. Lock the manager here.
timeout = nil
err := w.Manager.Lock()
if err != nil && !waddrmgr.IsError(err, waddrmgr.ErrLocked) {
Error("could not lock wallet:", err)
} else {
Info("the wallet has been locked")
}
// if *w.PodConfig.RunAsService && !first {
// // if we are running as a service this means shut down on lock as unlocking happens only at startup
// break out
// }
}
w.wg.Done()
}
// Unlock unlocks the wallet's address manager and relocks it after timeout has expired. If the wallet is already
// unlocked and the new passphrase is correct, the current timeout is replaced with the new one. The wallet will be
// locked if the passphrase is incorrect or any other error occurs during the unlock.
func (w *Wallet) Unlock(passphrase []byte, lock <-chan time.Time) error {
err := make(chan error, 1)
w.unlockRequests <- unlockRequest{
passphrase: passphrase,
lockAfter: lock,
err: err,
}
return <-err
}
// Lock locks the wallet's address manager.
func (w *Wallet) Lock() {
w.lockRequests <- struct{}{}
}
// Locked returns whether the account manager for a wallet is locked.
func (w *Wallet) Locked() bool {
return <-w.lockState
}
// holdUnlock prevents the wallet from being locked. The heldUnlock object *must* be released, or the wallet will
// forever remain unlocked.
//
// TODO: To prevent the above scenario, perhaps closures should be passed to the walletLocker goroutine and disallow
// callers from explicitly handling the locking mechanism.
func (w *Wallet) holdUnlock() (heldUnlock, error) {
req := make(chan heldUnlock)
w.holdUnlockRequests <- req
hl, ok := <-req
if !ok {
// TODO(davec): This should be defined and exported from waddrmgr.
return nil, waddrmgr.ManagerError{
ErrorCode: waddrmgr.ErrLocked,
Description: "address manager is locked",
}
}
return hl, nil
}
// release releases the hold on the unlocked-state of the wallet and allows the wallet to be locked again. If a lock
// timeout has already expired, the wallet is locked again as soon as release is called.
func (c heldUnlock) release() {
c <- struct{}{}
}
// ChangePrivatePassphrase attempts to change the passphrase for a wallet from old to new. Changing the passphrase is
// synchronized with all other address manager locking and unlocking. The lock state will be the same as it was before
// the password change.
func (w *Wallet) ChangePrivatePassphrase(old, new []byte) error {
err := make(chan error, 1)
w.changePassphrase <- changePassphraseRequest{
old: old,
new: new,
private: true,
err: err,
}
return <-err
}
// ChangePublicPassphrase modifies the public passphrase of the wallet.
func (w *Wallet) ChangePublicPassphrase(old, new []byte) error {
err := make(chan error, 1)
w.changePassphrase <- changePassphraseRequest{
old: old,
new: new,
private: false,
err: err,
}
return <-err
}
// ChangePassphrases modifies the public and private passphrase of the wallet atomically.
func (w *Wallet) ChangePassphrases(
publicOld, publicNew, privateOld,
privateNew []byte,
) error {
err := make(chan error, 1)
w.changePassphrases <- changePassphrasesRequest{
publicOld: publicOld,
publicNew: publicNew,
privateOld: privateOld,
privateNew: privateNew,
err: err,
}
return <-err
}
// // accountUsed returns whether there are any recorded transactions spending to
// // a given account. It returns true if atleast one address in the account was
// // used and false if no address in the account was used.
// func (w *Wallet) accountUsed(addrmgrNs walletdb.ReadWriteBucket, account uint32) (bool, error) {
// var used bool
// err := w.Manager.ForEachAccountAddress(addrmgrNs, account,
// func(maddr waddrmgr.ManagedAddress) error {
// used = maddr.Used(addrmgrNs)
// if used {
// return waddrmgr.Break
// }
// return nil
// })
// if err == waddrmgr.Break {
// err = nil
// }
// return used, err
// }
// AccountAddresses returns the addresses for every created address for an
// account.
func (w *Wallet) AccountAddresses(account uint32) (addrs []util.Address, err error) {
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
return w.Manager.ForEachAccountAddress(
addrmgrNs, account, func(maddr waddrmgr.ManagedAddress) error {
addrs = append(addrs, maddr.Address())
return nil
},
)
},
)
return
}
// CalculateBalance sums the amounts of all unspent transaction outputs to addresses of a wallet and returns the
// balance.
//
// If confirmations is 0, all UTXOs, even those not present in a block (height -1), will be used to get the balance.
// Otherwise, a UTXO must be in a block. If confirmations is 1 or greater, the balance will be calculated based on how
// many how many blocks include a UTXO.
func (w *Wallet) CalculateBalance(confirms int32) (util.Amount, error) {
var balance util.Amount
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
var err error
blk := w.Manager.SyncedTo()
balance, err = w.TxStore.Balance(txmgrNs, confirms, blk.Height)
return err
},
)
return balance, err
}
// Balances records total, spendable (by policy), and immature coinbase reward balance amounts.
type Balances struct {
Total util.Amount
Spendable util.Amount
ImmatureReward util.Amount
}
// CalculateAccountBalances sums the amounts of all unspent transaction outputs to the given account of a wallet and
// returns the balance.
//
// This function is much slower than it needs to be since transactions outputs are not indexed by the accounts they
// credit to, and all unspent transaction outputs must be iterated.
func (w *Wallet) CalculateAccountBalances(account uint32, confirms int32) (Balances, error) {
var bals Balances
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
// Get current block. The block height used for calculating
// the number of tx confirmations.
syncBlock := w.Manager.SyncedTo()
unspent, err := w.TxStore.UnspentOutputs(txmgrNs)
if err != nil {
Error(err)
return err
}
for i := range unspent {
output := &unspent[i]
var outputAcct uint32
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
output.PkScript, w.chainParams,
)
if err == nil && len(addrs) > 0 {
_, outputAcct, err = w.Manager.AddrAccount(addrmgrNs, addrs[0])
}
if err != nil || outputAcct != account {
continue
}
bals.Total += output.Amount
if output.FromCoinBase && !confirmed(
int32(w.chainParams.CoinbaseMaturity),
output.Height, syncBlock.Height,
) {
bals.ImmatureReward += output.Amount
} else if confirmed(confirms, output.Height, syncBlock.Height) {
bals.Spendable += output.Amount
}
}
return nil
},
)
return bals, err
}
// CurrentAddress gets the most recently requested Bitcoin payment address from a wallet for a particular key-chain
// scope. If the address has already been used (there is at least one transaction spending to it in the blockchain or
// pod mempool), the next chained address is returned.
func (w *Wallet) CurrentAddress(account uint32, scope waddrmgr.KeyScope) (util.Address, error) {
chainClient, err := w.requireChainClient()
if err != nil {
Error(err)
return nil, err
}
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
var (
addr util.Address
props *waddrmgr.AccountProperties
)
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
maddr, err := manager.LastExternalAddress(addrmgrNs, account)
if err != nil {
Error(err)
// If no address exists yet, create the first external address.
if waddrmgr.IsError(err, waddrmgr.ErrAddressNotFound) {
addr, props, err = w.newAddress(
addrmgrNs, account, scope,
)
}
return err
}
// Get next chained address if the last one has already been used.
if maddr.Used(addrmgrNs) {
addr, props, err = w.newAddress(
addrmgrNs, account, scope,
)
return err
}
addr = maddr.Address()
return nil
},
)
if err != nil {
Error(err)
return nil, err
}
// If the props have been initially, then we had to create a new address to satisfy the query. Notify the rpc server
// about the new address.
if props != nil {
err = chainClient.NotifyReceived([]util.Address{addr})
if err != nil {
Error(err)
return nil, err
}
w.NtfnServer.notifyAccountProperties(props)
}
return addr, nil
}
// PubKeyForAddress looks up the associated public key for a P2PKH address.
func (w *Wallet) PubKeyForAddress(a util.Address) (*ec.PublicKey, error) {
var pubKey *ec.PublicKey
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
managedAddr, err := w.Manager.Address(addrmgrNs, a)
if err != nil {
Error(err)
return err
}
managedPubKeyAddr, ok := managedAddr.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return errors.New("address does not have an associated public key")
}
pubKey = managedPubKeyAddr.PubKey()
return nil
},
)
return pubKey, err
}
// PrivKeyForAddress looks up the associated private key for a P2PKH or P2PK address.
func (w *Wallet) PrivKeyForAddress(a util.Address) (*ec.PrivateKey, error) {
var privKey *ec.PrivateKey
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
managedAddr, err := w.Manager.Address(addrmgrNs, a)
if err != nil {
Error(err)
return err
}
managedPubKeyAddr, ok := managedAddr.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return errors.New("address does not have an associated private key")
}
privKey, err = managedPubKeyAddr.PrivKey()
return err
},
)
return privKey, err
}
// HaveAddress returns whether the wallet is the owner of the address a.
func (w *Wallet) HaveAddress(a util.Address) (bool, error) {
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
_, err := w.Manager.Address(addrmgrNs, a)
return err
},
)
if err == nil {
return true, nil
}
if waddrmgr.IsError(err, waddrmgr.ErrAddressNotFound) {
return false, nil
}
return false, err
}
// AccountOfAddress finds the account that an address is associated with.
func (w *Wallet) AccountOfAddress(a util.Address) (uint32, error) {
var account uint32
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var err error
_, account, err = w.Manager.AddrAccount(addrmgrNs, a)
return err
},
)
return account, err
}
// AddressInfo returns detailed information regarding a wallet address.
func (w *Wallet) AddressInfo(a util.Address) (waddrmgr.ManagedAddress, error) {
var managedAddress waddrmgr.ManagedAddress
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var err error
managedAddress, err = w.Manager.Address(addrmgrNs, a)
return err
},
)
return managedAddress, err
}
// AccountNumber returns the account number for an account name under a particular key scope.
func (w *Wallet) AccountNumber(scope waddrmgr.KeyScope, accountName string) (uint32, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return 0, err
}
var account uint32
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var err error
account, err = manager.LookupAccount(addrmgrNs, accountName)
return err
},
)
return account, err
}
// AccountName returns the name of an account.
func (w *Wallet) AccountName(scope waddrmgr.KeyScope, accountNumber uint32) (string, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return "", err
}
var accountName string
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var err error
accountName, err = manager.AccountName(addrmgrNs, accountNumber)
return err
},
)
return accountName, err
}
// AccountProperties returns the properties of an account, including address indexes and name. It first fetches the
// desynced information from the address manager, then updates the indexes based on the address pools.
func (w *Wallet) AccountProperties(scope waddrmgr.KeyScope, acct uint32) (*waddrmgr.AccountProperties, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
var props *waddrmgr.AccountProperties
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
waddrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var err error
props, err = manager.AccountProperties(waddrmgrNs, acct)
return err
},
)
return props, err
}
// RenameAccount sets the name for an account number to newName.
func (w *Wallet) RenameAccount(scope waddrmgr.KeyScope, account uint32, newName string) error {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return err
}
var props *waddrmgr.AccountProperties
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
err := manager.RenameAccount(addrmgrNs, account, newName)
if err != nil {
Error(err)
return err
}
props, err = manager.AccountProperties(addrmgrNs, account)
return err
},
)
if err == nil {
w.NtfnServer.notifyAccountProperties(props)
}
return err
}
// const maxEmptyAccounts = 100
// NextAccount creates the next account and returns its account number. The name must be unique to the account. In order
// to support automatic seed restoring, new accounts may not be created when all of the previous 100 accounts have no
// transaction history (this is a deviation from the BIP0044 spec, which allows no unused account gaps).
func (w *Wallet) NextAccount(scope waddrmgr.KeyScope, name string) (uint32, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return 0, err
}
var (
account uint32
props *waddrmgr.AccountProperties
)
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
var err error
account, err = manager.NewAccount(addrmgrNs, name)
if err != nil {
Error(err)
return err
}
props, err = manager.AccountProperties(addrmgrNs, account)
return err
},
)
if err != nil {
Error(err)
Error(
"cannot fetch new account properties for notification after"+
" account creation:", err,
)
}
w.NtfnServer.notifyAccountProperties(props)
return account, err
}
// CreditCategory describes the type of wallet transaction output. The category of "sent transactions" (debits) is
// always "send", and is not expressed by this type.
//
// TODO: This is a requirement of the RPC server and should be moved.
type CreditCategory byte
// These constants define the possible credit categories.
const (
CreditReceive CreditCategory = iota
CreditGenerate
CreditImmature
)
// String returns the category as a string. This string may be used as the JSON string for categories as part of
// listtransactions and gettransaction RPC responses.
func (c CreditCategory) String() string {
switch c {
case CreditReceive:
return "receive"
case CreditGenerate:
return "generate"
case CreditImmature:
return "immature"
default:
return "unknown"
}
}
// RecvCategory returns the category of received credit outputs from a transaction record. The passed block chain height
// is used to distinguish immature from mature coinbase outputs.
//
// TODO: This is intended for use by the RPC server and should be moved out of this package at a later time.
func RecvCategory(details *wtxmgr.TxDetails, syncHeight int32, net *netparams.Params) CreditCategory {
if blockchain.IsCoinBaseTx(&details.MsgTx) {
if confirmed(
int32(net.CoinbaseMaturity), details.Block.Height,
syncHeight,
) {
return CreditGenerate
}
return CreditImmature
}
return CreditReceive
}
// listTransactions creates a object that may be marshalled to a response result for a listtransactions RPC.
//
// TODO: This should be moved to the legacyrpc package.
func listTransactions(
tx walletdb.ReadTx, details *wtxmgr.TxDetails, addrMgr *waddrmgr.Manager,
syncHeight int32, net *netparams.Params,
) []btcjson.ListTransactionsResult {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
var (
blockHashStr string
blockTime int64
confirmations int64
)
if details.Block.Height != -1 {
blockHashStr = details.Block.Hash.String()
blockTime = details.Block.Time.Unix()
confirmations = int64(confirms(details.Block.Height, syncHeight))
}
results := []btcjson.ListTransactionsResult{}
txHashStr := details.Hash.String()
received := details.Received.Unix()
generated := blockchain.IsCoinBaseTx(&details.MsgTx)
recvCat := RecvCategory(details, syncHeight, net).String()
send := len(details.Debits) != 0
// Fee can only be determined if every input is a debit.
var feeF64 float64
if len(details.Debits) == len(details.MsgTx.TxIn) {
var debitTotal util.Amount
for _, deb := range details.Debits {
debitTotal += deb.Amount
}
var outputTotal util.Amount
for _, output := range details.MsgTx.TxOut {
outputTotal += util.Amount(output.Value)
}
// Note: The actual fee is debitTotal - outputTotal. However, this RPC reports negative numbers for fees, so the
// inverse is calculated.
feeF64 = (outputTotal - debitTotal).ToDUO()
}
outputs:
for i, output := range details.MsgTx.TxOut {
// Determine if this output is a credit, and if so, determine its spentness.
var isCredit bool
var spentCredit bool
for _, cred := range details.Credits {
if cred.Index == uint32(i) {
// Change outputs are ignored.
if cred.Change {
continue outputs
}
isCredit = true
spentCredit = cred.Spent
break
}
}
var address string
var accountName string
_, addrs, _, _ := txscript.ExtractPkScriptAddrs(output.PkScript, net)
if len(addrs) == 1 {
addr := addrs[0]
address = addr.EncodeAddress()
mgr, account, err := addrMgr.AddrAccount(addrmgrNs, addrs[0])
if err == nil {
accountName, err = mgr.AccountName(addrmgrNs, account)
if err != nil {
Error(err)
accountName = ""
}
}
}
amountF64 := util.Amount(output.Value).ToDUO()
blockIndex := int64(details.Block.Height)
result := btcjson.ListTransactionsResult{
// Fields left zeroed:
// InvolvesWatchOnly
// BlockIndex
//
// Fields set below:
// Account (only for non-"send" categories)
// Category
// Amount
// Fee
Address: address,
Vout: uint32(i),
Confirmations: confirmations,
Generated: generated,
BlockHash: blockHashStr,
BlockIndex: blockIndex,
BlockTime: blockTime,
TxID: txHashStr,
WalletConflicts: []string{},
Time: received,
TimeReceived: received,
}
// Add a received/generated/immature result if this is a credit. If the output was spent, create a second result
// under the send category with the inverse of the output amount. It is therefore possible that a single output
// may be included in the results set zero, one, or two times.
//
// Since credits are not saved for outputs that are not controlled by this wallet, all non-credits from
// transactions with debits are grouped under the send category.
if send || spentCredit {
result.Category = "send"
result.Amount = -amountF64
result.Fee = feeF64
results = append(results, result)
}
if isCredit {
result.Account = accountName
result.Category = recvCat
result.Amount = amountF64
result.Fee = 0
results = append(results, result)
}
}
return results
}
// ListSinceBlock returns a slice of objects with details about transactions since the given block. If the block is -1
// then all transactions are included. This is intended to be used for listsinceblock RPC replies.
func (w *Wallet) ListSinceBlock(start, end, syncHeight int32) (txList []btcjson.ListTransactionsResult, err error) {
txList = []btcjson.ListTransactionsResult{}
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
for _, detail := range details {
jsonResults := listTransactions(
tx, &detail,
w.Manager, syncHeight, w.chainParams,
)
txList = append(txList, jsonResults...)
}
return false, nil
}
return w.TxStore.RangeTransactions(txmgrNs, start, end, rangeFn)
},
)
return
}
// ListTransactions returns a slice of objects with details about a recorded transaction. This is intended to be used
// for listtransactions RPC replies.
func (w *Wallet) ListTransactions(from, count int) (txList []btcjson.ListTransactionsResult, err error) {
// txList := []btcjson.ListTransactionsResult{}
// Debug("ListTransactions", from, count)
if err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
// Get current block. The block height used for calculating the number of tx confirmations.
syncBlock := w.Manager.SyncedTo()
// Debug("synced to", syncBlock)
// Need to skip the first from transactions, and after those, only include the next count transactions.
skipped := 0
n := 0
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
// Iterate over transactions at this height in reverse order. This does nothing for unmined transactions,
// which are unsorted, but it will process mined transactions in the reverse order they were marked mined.
for i := len(details) - 1; i >= 0; i-- {
if from > skipped {
skipped++
continue
}
n++
if n > count {
return true, nil
}
jsonResults := listTransactions(
tx, &details[i],
w.Manager, syncBlock.Height, w.chainParams,
)
txList = append(txList, jsonResults...)
if len(jsonResults) > 0 {
n++
}
}
return false, nil
}
// Return newer results first by starting at mempool height and working down to the genesis block.
return w.TxStore.RangeTransactions(txmgrNs, -1, 0, rangeFn)
},
); Check(err) {
}
return
}
// ListAddressTransactions returns a slice of objects with details about recorded transactions to or from any address
// belonging to a set. This is intended to be used for listaddresstransactions RPC replies.
func (w *Wallet) ListAddressTransactions(pkHashes map[string]struct{}) ([]btcjson.ListTransactionsResult, error) {
txList := []btcjson.ListTransactionsResult{}
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
// Get current block. The block height used for calculating the number of tx confirmations.
syncBlock := w.Manager.SyncedTo()
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
loopDetails:
for i := range details {
detail := &details[i]
for _, cred := range detail.Credits {
pkScript := detail.MsgTx.TxOut[cred.Index].PkScript
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
pkScript, w.chainParams,
)
if err != nil || len(addrs) != 1 {
continue
}
apkh, ok := addrs[0].(*util.AddressPubKeyHash)
if !ok {
continue
}
_, ok = pkHashes[string(apkh.ScriptAddress())]
if !ok {
continue
}
jsonResults := listTransactions(
tx, detail,
w.Manager, syncBlock.Height, w.chainParams,
)
// if err != nil {
// return false, err
// }
txList = append(txList, jsonResults...)
continue loopDetails
}
}
return false, nil
}
return w.TxStore.RangeTransactions(txmgrNs, 0, -1, rangeFn)
},
)
return txList, err
}
// ListAllTransactions returns a slice of objects with details about a recorded transaction. This is intended to be used
// for listalltransactions RPC replies.
func (w *Wallet) ListAllTransactions() ([]btcjson.ListTransactionsResult, error) {
txList := []btcjson.ListTransactionsResult{}
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
// Get current block. The block height used for calculating the number of tx confirmations.
syncBlock := w.Manager.SyncedTo()
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
// Iterate over transactions at this height in reverse order. This does nothing for unmined transactions,
// which are unsorted, but it will process mined transactions in the reverse order they were marked mined.
for i := len(details) - 1; i >= 0; i-- {
jsonResults := listTransactions(
tx, &details[i], w.Manager,
syncBlock.Height, w.chainParams,
)
txList = append(txList, jsonResults...)
}
return false, nil
}
// Return newer results first by starting at mempool height and working down to the genesis block.
return w.TxStore.RangeTransactions(txmgrNs, -1, 0, rangeFn)
},
)
return txList, err
}
// BlockIdentifier identifies a block by either a height or a hash.
type BlockIdentifier struct {
height int32
hash *chainhash.Hash
}
// NewBlockIdentifierFromHeight constructs a BlockIdentifier for a block height.
func NewBlockIdentifierFromHeight(height int32) *BlockIdentifier {
return &BlockIdentifier{height: height}
}
// NewBlockIdentifierFromHash constructs a BlockIdentifier for a block hash.
func NewBlockIdentifierFromHash(hash *chainhash.Hash) *BlockIdentifier {
return &BlockIdentifier{hash: hash}
}
// GetTransactionsResult is the result of the wallet's GetTransactions method. See GetTransactions for more details.
type GetTransactionsResult struct {
MinedTransactions []Block
UnminedTransactions []TransactionSummary
}
// GetTransactions returns transaction results between a starting and ending block. BlockC in the block range may be
// specified by either a height or a hash.
//
// Because this is a possibly lengthy operation, a cancel channel is provided to cancel the task. If this channel
// unblocks, the results created thus far will be returned.
//
// Transaction results are organized by blocks in ascending order and unmined transactions in an unspecified order.
// Mined transactions are saved in a Block structure which records properties about the block.
func (w *Wallet) GetTransactions(startBlock, endBlock *BlockIdentifier, cancel qu.C) (
*GetTransactionsResult,
error,
) {
var start, end int32 = 0, -1
w.chainClientLock.Lock()
chainClient := w.chainClient
w.chainClientLock.Unlock()
// TODO: Fetching block heights by their hashes is inherently racy because not all block headers are saved but when
// they are for SPV the db can be queried directly without this.
var startResp, endResp rpcclient.FutureGetBlockVerboseResult
if startBlock != nil {
if startBlock.hash == nil {
start = startBlock.height
} else {
if chainClient == nil {
return nil, errors.New("no chain server client")
}
switch client := chainClient.(type) {
case *chain.RPCClient:
startResp = client.GetBlockVerboseTxAsync(startBlock.hash)
case *chain.BitcoindClient:
var err error
start, err = client.GetBlockHeight(startBlock.hash)
if err != nil {
Error(err)
return nil, err
}
case *chain.NeutrinoClient:
var err error
start, err = client.GetBlockHeight(startBlock.hash)
if err != nil {
Error(err)
return nil, err
}
}
}
}
if endBlock != nil {
if endBlock.hash == nil {
end = endBlock.height
} else {
if chainClient == nil {
return nil, errors.New("no chain server client")
}
switch client := chainClient.(type) {
case *chain.RPCClient:
endResp = client.GetBlockVerboseTxAsync(endBlock.hash)
case *chain.NeutrinoClient:
var err error
end, err = client.GetBlockHeight(endBlock.hash)
if err != nil {
Error(err)
return nil, err
}
}
}
}
if startResp != nil {
resp, err := startResp.Receive()
if err != nil {
Error(err)
return nil, err
}
start = int32(resp.Height)
}
if endResp != nil {
resp, err := endResp.Receive()
if err != nil {
Error(err)
return nil, err
}
end = int32(resp.Height)
}
var res GetTransactionsResult
err := walletdb.View(
w.db, func(dbtx walletdb.ReadTx) error {
txmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
// TODO: probably should make RangeTransactions not reuse the
// details backing array memory.
dets := make([]wtxmgr.TxDetails, len(details))
copy(dets, details)
details = dets
txs := make([]TransactionSummary, 0, len(details))
for i := range details {
txs = append(txs, makeTxSummary(dbtx, w, &details[i]))
}
if details[0].Block.Height != -1 {
blockHash := details[0].Block.Hash
res.MinedTransactions = append(
res.MinedTransactions, Block{
Hash: &blockHash,
Height: details[0].Block.Height,
Timestamp: details[0].Block.Time.Unix(),
Transactions: txs,
},
)
} else {
res.UnminedTransactions = txs
}
select {
case <-cancel:
return true, nil
default:
return false, nil
}
}
return w.TxStore.RangeTransactions(txmgrNs, start, end, rangeFn)
},
)
return &res, err
}
// AccountResult is a single account result for the AccountsResult type.
type AccountResult struct {
waddrmgr.AccountProperties
TotalBalance util.Amount
}
// AccountsResult is the result of the wallet's Accounts method. See that method for more details.
type AccountsResult struct {
Accounts []AccountResult
CurrentBlockHash *chainhash.Hash
CurrentBlockHeight int32
}
// Accounts returns the current names, numbers, and total balances of all accounts in the wallet restricted to a
// particular key scope. The current chain tip is included in the result for atomicity reasons.
//
// TODO(jrick): Is the chain tip really needed, since only the total balances are included?
func (w *Wallet) Accounts(scope waddrmgr.KeyScope) (*AccountsResult, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
var (
accounts []AccountResult
syncBlockHash *chainhash.Hash
syncBlockHeight int32
)
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
syncBlock := w.Manager.SyncedTo()
syncBlockHash = &syncBlock.Hash
syncBlockHeight = syncBlock.Height
unspent, err := w.TxStore.UnspentOutputs(txmgrNs)
if err != nil {
Error(err)
return err
}
err = manager.ForEachAccount(
addrmgrNs, func(acct uint32) error {
props, err := manager.AccountProperties(addrmgrNs, acct)
if err != nil {
Error(err)
return err
}
accounts = append(
accounts, AccountResult{
AccountProperties: *props,
// TotalBalance set below
},
)
return nil
},
)
if err != nil {
Error(err)
return err
}
m := make(map[uint32]*util.Amount)
for i := range accounts {
a := &accounts[i]
m[a.AccountNumber] = &a.TotalBalance
}
for i := range unspent {
output := unspent[i]
var outputAcct uint32
_, addrs, _, err := txscript.ExtractPkScriptAddrs(output.PkScript, w.chainParams)
if err == nil && len(addrs) > 0 {
_, outputAcct, err = w.Manager.AddrAccount(addrmgrNs, addrs[0])
}
if err == nil {
amt, ok := m[outputAcct]
if ok {
*amt += output.Amount
}
}
}
return nil
},
)
return &AccountsResult{
Accounts: accounts,
CurrentBlockHash: syncBlockHash,
CurrentBlockHeight: syncBlockHeight,
},
err
}
// AccountBalanceResult is a single result for the Wallet.AccountBalances method.
type AccountBalanceResult struct {
AccountNumber uint32
AccountName string
AccountBalance util.Amount
}
// AccountBalances returns all accounts in the wallet and their balances. Balances are determined by excluding
// transactions that have not met requiredConfs confirmations.
func (w *Wallet) AccountBalances(
scope waddrmgr.KeyScope,
requiredConfs int32,
) ([]AccountBalanceResult, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
var results []AccountBalanceResult
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
syncBlock := w.Manager.SyncedTo()
// Fill out all account info except for the balances.
lastAcct, err := manager.LastAccount(addrmgrNs)
if err != nil {
Error(err)
return err
}
results = make([]AccountBalanceResult, lastAcct+2)
for i := range results[:len(results)-1] {
accountName, err := manager.AccountName(addrmgrNs, uint32(i))
if err != nil {
Error(err)
return err
}
results[i].AccountNumber = uint32(i)
results[i].AccountName = accountName
}
results[len(results)-1].AccountNumber = waddrmgr.ImportedAddrAccount
results[len(results)-1].AccountName = waddrmgr.ImportedAddrAccountName
// Fetch all unspent outputs, and iterate over them tallying each account's balance where the output script pays
// to an account address and the required number of confirmations is met.
unspentOutputs, err := w.TxStore.UnspentOutputs(txmgrNs)
if err != nil {
Error(err)
return err
}
for i := range unspentOutputs {
output := &unspentOutputs[i]
if !confirmed(requiredConfs, output.Height, syncBlock.Height) |
if output.FromCoinBase && !confirmed(
int32(w.ChainParams().CoinbaseMaturity),
output.Height, syncBlock.Height,
) {
continue
}
_, addrs, _, err := txscript.ExtractPkScriptAddrs(output.PkScript, w.chainParams)
if err != nil || len(addrs) == 0 {
continue
}
outputAcct, err := manager.AddrAccount(addrmgrNs, addrs[0])
if err != nil {
Error(err)
continue
}
switch {
case outputAcct == waddrmgr.ImportedAddrAccount:
results[len(results)-1].AccountBalance += output.Amount
case outputAcct > lastAcct:
return errors.New(
"waddrmgr.Manager.AddrAccount returned account " +
"beyond recorded last account",
)
default:
results[outputAcct].AccountBalance += output.Amount
}
}
return nil
},
)
return results, err
}
// creditSlice satisfies the sort.Interface interface to provide sorting transaction credits from oldest to newest.
// Credits with the same receive time and mined in the same block are not guaranteed to be sorted by the order they
// appear in the block. Credits from the same transaction are sorted by output index.
type creditSlice []wtxmgr.Credit
func (s creditSlice) Len() int {
return len(s)
}
func (s creditSlice) Less(i, j int) bool {
switch {
// If both credits are from the same tx, sort by output index.
case s[i].OutPoint.Hash == s[j].OutPoint.Hash:
return s[i].OutPoint.Index < s[j].OutPoint.Index
// If both transactions are unmined, sort by their received date.
case s[i].Height == -1 && s[j].Height == -1:
return s[i].Received.Before(s[j].Received)
// Unmined (newer) txs always come last.
case s[i].Height == -1:
return false
case s[j].Height == -1:
return true
// If both txs are mined in different blocks, sort by block height.
default:
return s[i].Height < s[j].Height
}
}
func (s creditSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// ListUnspent returns a slice of objects representing the unspent wallet transactions fitting the given criteria. The
// confirmations will be more than minconf, less than maxconf and if addresses is populated only the addresses contained
// within it will be considered. If we know nothing about a transaction an empty array will be returned.
func (w *Wallet) ListUnspent(
minconf, maxconf int32,
addresses map[string]struct{},
) ([]*btcjson.ListUnspentResult, error) {
var results []*btcjson.ListUnspentResult
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
syncBlock := w.Manager.SyncedTo()
filter := len(addresses) != 0
unspent, err := w.TxStore.UnspentOutputs(txmgrNs)
if err != nil {
Error(err)
return err
}
sort.Sort(sort.Reverse(creditSlice(unspent)))
defaultAccountName := "default"
results = make([]*btcjson.ListUnspentResult, 0, len(unspent))
for i := range unspent {
output := unspent[i]
// Outputs with fewer confirmations than the minimum or more confs than the maximum are excluded.
confs := confirms(output.Height, syncBlock.Height)
if confs < minconf || confs > maxconf {
continue
}
// Only mature coinbase outputs are included.
if output.FromCoinBase {
target := int32(w.ChainParams().CoinbaseMaturity)
if !confirmed(target, output.Height, syncBlock.Height) {
continue
}
}
// Exclude locked outputs from the result set.
if w.LockedOutpoint(output.OutPoint) {
continue
}
// Lookup the associated account for the output. Use the default account name in case there is no associated
// account for some reason, although this should never happen.
//
// This will be unnecessary once transactions and outputs are grouped under the associated account in the
// db.
acctName := defaultAccountName
sc, addrs, _, err := txscript.ExtractPkScriptAddrs(
output.PkScript, w.chainParams,
)
if err != nil {
Error(err)
continue
}
if len(addrs) > 0 {
smgr, acct, err := w.Manager.AddrAccount(addrmgrNs, addrs[0])
if err == nil {
s, err := smgr.AccountName(addrmgrNs, acct)
if err == nil {
acctName = s
}
}
}
if filter {
for _, addr := range addrs {
_, ok := addresses[addr.EncodeAddress()]
if ok {
goto include
}
}
continue
}
include:
// At the moment watch-only addresses are not supported, so all recorded outputs that are not multisig are
// "spendable". Multisig outputs are only "spendable" if all keys are controlled by this wallet.
//
// TODO: Each case will need updates when watch-only addrs is added. For P2PK, P2PKH, and P2SH, the address
// must be looked up and not be watching-only. For multisig, all pubkeys must belong to the manager with the
// associated private key (currently it only checks whether the pubkey exists, since the private key is
// required at the moment).
var spendable bool
scSwitch:
switch sc {
case txscript.PubKeyHashTy:
spendable = true
case txscript.PubKeyTy:
spendable = true
case txscript.WitnessV0ScriptHashTy:
spendable = true
case txscript.WitnessV0PubKeyHashTy:
spendable = true
case txscript.MultiSigTy:
for _, a := range addrs {
_, err := w.Manager.Address(addrmgrNs, a)
if err == nil {
continue
}
if waddrmgr.IsError(err, waddrmgr.ErrAddressNotFound) {
break scSwitch
}
return err
}
spendable = true
}
result := &btcjson.ListUnspentResult{
TxID: output.OutPoint.Hash.String(),
Vout: output.OutPoint.Index,
Account: acctName,
ScriptPubKey: hex.EncodeToString(output.PkScript),
Amount: output.Amount.ToDUO(),
Confirmations: int64(confs),
Spendable: spendable,
}
// BUG: this should be a JSON array so that all addresses can be included, or removed (and the caller
// extracts addresses from the pkScript).
if len(addrs) > 0 {
result.Address = addrs[0].EncodeAddress()
}
results = append(results, result)
}
return nil
},
)
return results, err
}
// DumpPrivKeys returns the WIF-encoded private keys for all addresses with private keys in a wallet.
func (w *Wallet) DumpPrivKeys() ([]string, error) {
var privkeys []string
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
// Iterate over each active address, appending the private key to privkeys.
return w.Manager.ForEachActiveAddress(
addrmgrNs, func(addr util.Address) error {
ma, err := w.Manager.Address(addrmgrNs, addr)
if err != nil {
Error(err)
return err
}
// Only those addresses with keys needed.
pka, ok := ma.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return nil
}
wif, err := pka.ExportPrivKey()
if err != nil {
Error(err)
// It would be nice to zero out the array here. However, since strings in go are immutable, and we have
// no control over the caller I don't think we can. :(
return err
}
privkeys = append(privkeys, wif.String())
return nil
},
)
},
)
return privkeys, err
}
// DumpWIFPrivateKey returns the WIF encoded private key for a single wallet address.
func (w *Wallet) DumpWIFPrivateKey(addr util.Address) (string, error) {
var maddr waddrmgr.ManagedAddress
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
waddrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
// Get private key from wallet if it exists.
var err error
maddr, err = w.Manager.Address(waddrmgrNs, addr)
return err
},
)
if err != nil {
Error(err)
return "", err
}
pka, ok := maddr.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return "", fmt.Errorf("address %s is not a key type", addr)
}
wif, err := pka.ExportPrivKey()
if err != nil {
Error(err)
return "", err
}
return wif.String(), nil
}
// ImportPrivateKey imports a private key to the wallet and writes the new wallet to disk.
func (w *Wallet) ImportPrivateKey(
scope waddrmgr.KeyScope, wif *util.WIF,
bs *waddrmgr.BlockStamp, rescan bool,
) (string, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return "", err
}
// The starting block for the key is the genesis block unless otherwise specified.
var newBirthday time.Time
if bs == nil {
bs = &waddrmgr.BlockStamp{
Hash: *w.chainParams.GenesisHash,
Height: 0,
}
} else {
// Only update the new birthday time from default value if we actually have timestamp info in the header.
header, err := w.chainClient.GetBlockHeader(&bs.Hash)
if err == nil {
newBirthday = header.Timestamp
}
}
// Attempt to import private key into wallet.
var addr util.Address
var props *waddrmgr.AccountProperties
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
maddr, err := manager.ImportPrivateKey(addrmgrNs, wif, bs)
if err != nil {
Error(err)
return err
}
addr = maddr.Address()
props, err = manager.AccountProperties(
addrmgrNs, waddrmgr.ImportedAddrAccount,
)
if err != nil {
Error(err)
return err
}
return w.Manager.SetBirthday(addrmgrNs, newBirthday)
},
)
if err != nil {
Error(err)
return "", err
}
// Rescan blockchain for transactions with txout scripts paying to the imported address.
if rescan {
job := &RescanJob{
Addrs: []util.Address{addr},
OutPoints: nil,
BlockStamp: *bs,
}
// Submit rescan job and log when the import has completed. Do not block on finishing the rescan. The rescan
// success or failure is logged elsewhere, and the channel is not required to be read, so discard the return
// value.
_ = w.SubmitRescan(job)
} else {
err := w.chainClient.NotifyReceived([]util.Address{addr})
if err != nil {
Error(err)
return "", fmt.Errorf(
"failed to subscribe for address ntfns for "+
"address %s: %s", addr.EncodeAddress(), err,
)
}
}
addrStr := addr.EncodeAddress()
Info("imported payment address", addrStr)
w.NtfnServer.notifyAccountProperties(props)
// Return the payment address string of the imported private key.
return addrStr, nil
}
// LockedOutpoint returns whether an outpoint has been marked as locked and should not be used as an input for created
// transactions.
func (w *Wallet) LockedOutpoint(op wire.OutPoint) bool {
_, locked := w.lockedOutpoints[op]
return locked
}
// LockOutpoint marks an outpoint as locked, that is, it should not be used as an input for newly created transactions.
func (w *Wallet) LockOutpoint(op wire.OutPoint) {
w.lockedOutpoints[op] = struct{}{}
}
// UnlockOutpoint marks an outpoint as unlocked, that is, it may be used as an input for newly created transactions.
func (w *Wallet) UnlockOutpoint(op wire.OutPoint) {
delete(w.lockedOutpoints, op)
}
// ResetLockedOutpoints resets the set of locked outpoints so all may be used as inputs for new transactions.
func (w *Wallet) ResetLockedOutpoints() {
w.lockedOutpoints = map[wire.OutPoint]struct{}{}
}
// LockedOutpoints returns a slice of currently locked outpoints. This is intended to be used by marshaling the result
// as a JSON array for listlockunspent RPC results.
func (w *Wallet) LockedOutpoints() []btcjson.TransactionInput {
locked := make([]btcjson.TransactionInput, len(w.lockedOutpoints))
i := 0
for op := range w.lockedOutpoints {
locked[i] = btcjson.TransactionInput{
Txid: op.Hash.String(),
Vout: op.Index,
}
i++
}
return locked
}
// resendUnminedTxs iterates through all transactions that spend from wallet credits that are not known to have been
// mined into a block, and attempts to send each to the chain server for relay.
func (w *Wallet) resendUnminedTxs() {
chainClient, err := w.requireChainClient()
if err != nil {
Error(err)
Error("no chain server available to resend unmined transactions", err)
return
}
var txs []*wire.MsgTx
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
var err error
txs, err = w.TxStore.UnminedTxs(txmgrNs)
return err
},
)
if err != nil {
Error(err)
Error("cannot load unmined transactions for resending:", err)
return
}
for _, tx := range txs {
resp, err := chainClient.SendRawTransaction(tx, false)
if err != nil {
Error(err)
Debugf(
"could not resend transaction %v: %v %s",
tx.TxHash(), err,
)
// We'll only stop broadcasting transactions if we detect that the output has already been fully spent, is
// an orphan, or is conflicting with another transaction.
//
// TODO(roasbeef): SendRawTransaction needs to return concrete error types, no need for string matching
switch {
// The following are errors returned from pod's mempool.
case strings.Contains(err.Error(), "spent"):
case strings.Contains(err.Error(), "orphan"):
case strings.Contains(err.Error(), "conflict"):
case strings.Contains(err.Error(), "already exists"):
case strings.Contains(err.Error(), "negative"):
// The following errors are returned from bitcoind's
// mempool.
case strings.Contains(err.Error(), "Missing inputs"):
case strings.Contains(err.Error(), "already in block chain"):
case strings.Contains(err.Error(), "fee not met"):
default:
continue
}
// As the transaction was rejected, we'll attempt to remove the unmined transaction all together. Otherwise,
// we'll keep attempting to rebroadcast this, and we may be computing our balance incorrectly if this tx
// credits or debits to us.
tt := tx
err := walletdb.Update(
w.db, func(dbTx walletdb.ReadWriteTx) error {
txmgrNs := dbTx.ReadWriteBucket(wtxmgrNamespaceKey)
txRec, err := wtxmgr.NewTxRecordFromMsgTx(
tt, time.Now(),
)
if err != nil {
Error(err)
return err
}
return w.TxStore.RemoveUnminedTx(txmgrNs, txRec)
},
)
if err != nil {
Error(err)
Warnf(
"unable to remove conflicting tx %v: %v %s", tt.TxHash(),
err,
)
continue
}
Infoc(
func() string {
return "removed conflicting tx:" + spew.Sdump(tt) + " "
},
)
continue
}
Debug("resent unmined transaction", resp)
}
}
// SortedActivePaymentAddresses returns a slice of all active payment addresses in a wallet.
func (w *Wallet) SortedActivePaymentAddresses() ([]string, error) {
var addrStrs []string
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
return w.Manager.ForEachActiveAddress(
addrmgrNs, func(addr util.Address) error {
addrStrs = append(addrStrs, addr.EncodeAddress())
return nil
},
)
},
)
if err != nil {
Error(err)
return nil, err
}
sort.Strings(addrStrs)
return addrStrs, nil
}
// NewAddress returns the next external chained address for a wallet.
func (w *Wallet) NewAddress(
account uint32, scope waddrmgr.KeyScope,
nochain bool,
) (util.Address, error) {
var (
chainClient chain.Interface
err error
addr util.Address
props *waddrmgr.AccountProperties
)
if !nochain {
chainClient, err = w.requireChainClient()
if err != nil {
Error(err)
return nil, err
}
}
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
var err error
addr, props, err = w.newAddress(addrmgrNs, account, scope)
return err
},
)
if err != nil {
Error(err)
return nil, err
}
if !nochain {
// Notify the rpc server about the newly created address.
err = chainClient.NotifyReceived([]util.Address{addr})
if err != nil {
Error(err)
return nil, err
}
w.NtfnServer.notifyAccountProperties(props)
}
return addr, nil
}
func (w *Wallet) newAddress(
addrmgrNs walletdb.ReadWriteBucket, account uint32,
scope waddrmgr.KeyScope,
) (util.Address, *waddrmgr.AccountProperties, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, nil, err
}
// Get next address from wallet.
addrs, err := manager.NextExternalAddresses(addrmgrNs, account, 1)
if err != nil {
Error(err)
return nil, nil, err
}
props, err := manager.AccountProperties(addrmgrNs, account)
if err != nil {
Error(err)
Error(
"cannot fetch account properties for notification after deriving next external address:",
err,
)
return nil, nil, err
}
return addrs[0].Address(), props, nil
}
// NewChangeAddress returns a new change address for a wallet.
func (w *Wallet) NewChangeAddress(
account uint32,
scope waddrmgr.KeyScope,
) (util.Address, error) {
chainClient, err := w.requireChainClient()
if err != nil {
Error(err)
return nil, err
}
var addr util.Address
err = walletdb.Update(
w.db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
var err error
addr, err = w.newChangeAddress(addrmgrNs, account)
return err
},
)
if err != nil {
Error(err)
return nil, err
}
// Notify the rpc server about the newly created address.
err = chainClient.NotifyReceived([]util.Address{addr})
if err != nil {
Error(err)
return nil, err
}
return addr, nil
}
func (w *Wallet) newChangeAddress(
addrmgrNs walletdb.ReadWriteBucket,
account uint32,
) (util.Address, error) {
// As we're making a change address, we'll fetch the type of manager that is able to make p2wkh output as they're
// the most efficient.
scopes := w.Manager.ScopesForExternalAddrType(
waddrmgr.WitnessPubKey,
)
manager, err := w.Manager.FetchScopedKeyManager(scopes[0])
if err != nil {
Error(err)
return nil, err
}
// Get next chained change address from wallet for account.
addrs, err := manager.NextInternalAddresses(addrmgrNs, account, 1)
if err != nil {
Error(err)
return nil, err
}
return addrs[0].Address(), nil
}
// confirmed checks whether a transaction at height txHeight has met minconf confirmations for a blockchain at height
// curHeight.
func confirmed(minconf, txHeight, curHeight int32) bool {
return confirms(txHeight, curHeight) >= minconf
}
// confirms returns the number of confirmations for a transaction in a block at height txHeight (or -1 for an
// unconfirmed tx) given the chain height curHeight.
func confirms(txHeight, curHeight int32) int32 {
switch {
case txHeight == -1, txHeight > curHeight:
return 0
default:
return curHeight - txHeight + 1
}
}
// AccountTotalReceivedResult is a single result for the Wallet.TotalReceivedForAccounts method.
type AccountTotalReceivedResult struct {
AccountNumber uint32
AccountName string
TotalReceived util.Amount
LastConfirmation int32
}
// TotalReceivedForAccounts iterates through a wallet's transaction history, returning the total amount of Bitcoin
// received for all accounts.
func (w *Wallet) TotalReceivedForAccounts(
scope waddrmgr.KeyScope,
minConf int32,
) ([]AccountTotalReceivedResult, error) {
manager, err := w.Manager.FetchScopedKeyManager(scope)
if err != nil {
Error(err)
return nil, err
}
var results []AccountTotalReceivedResult
err = walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
syncBlock := w.Manager.SyncedTo()
err := manager.ForEachAccount(
addrmgrNs, func(account uint32) error {
accountName, err := manager.AccountName(addrmgrNs, account)
if err != nil {
Error(err)
return err
}
results = append(
results, AccountTotalReceivedResult{
AccountNumber: account,
AccountName: accountName,
},
)
return nil
},
)
if err != nil {
Error(err)
return err
}
var stopHeight int32
if minConf > 0 {
stopHeight = syncBlock.Height - minConf + 1
} else {
stopHeight = -1
}
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
for i := range details {
detail := &details[i]
for _, cred := range detail.Credits {
pkScript := detail.MsgTx.TxOut[cred.Index].PkScript
var outputAcct uint32
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, w.chainParams)
if err == nil && len(addrs) > 0 {
_, outputAcct, err = w.Manager.AddrAccount(addrmgrNs, addrs[0])
}
if err == nil {
acctIndex := int(outputAcct)
if outputAcct == waddrmgr.ImportedAddrAccount {
acctIndex = len(results) - 1
}
res := &results[acctIndex]
res.TotalReceived += cred.Amount
res.LastConfirmation = confirms(
detail.Block.Height, syncBlock.Height,
)
}
}
}
return false, nil
}
return w.TxStore.RangeTransactions(txmgrNs, 0, stopHeight, rangeFn)
},
)
return results, err
}
// TotalReceivedForAddr iterates through a wallet's transaction history, returning the total amount of bitcoins received
// for a single wallet address.
func (w *Wallet) TotalReceivedForAddr(addr util.Address, minConf int32) (util.Amount, error) {
var amount util.Amount
err := walletdb.View(
w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
syncBlock := w.Manager.SyncedTo()
var (
addrStr = addr.EncodeAddress()
stopHeight int32
)
if minConf > 0 {
stopHeight = syncBlock.Height - minConf + 1
} else {
stopHeight = -1
}
rangeFn := func(details []wtxmgr.TxDetails) (bool, error) {
for i := range details {
detail := &details[i]
for _, cred := range detail.Credits {
pkScript := detail.MsgTx.TxOut[cred.Index].PkScript
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
pkScript,
w.chainParams,
)
// An error creating addresses from the output script only indicates a non-standard script, so
// ignore this credit.
if err != nil {
Error(err)
continue
}
for _, a := range addrs {
if addrStr == a.EncodeAddress() {
amount += cred.Amount
break
}
}
}
}
return false, nil
}
return w.TxStore.RangeTransactions(txmgrNs, 0, stopHeight, rangeFn)
},
)
return amount, err
}
// SendOutputs creates and sends payment transactions. It returns the transaction hash upon success.
func (w *Wallet) SendOutputs(
outputs []*wire.TxOut, account uint32,
minconf int32, satPerKb util.Amount,
) (*chainhash.Hash, error) {
// Ensure the outputs to be created adhere to the network's consensus rules.
for _, output := range outputs {
if err := txrules.CheckOutput(output, satPerKb); err != nil {
return nil, err
}
}
// Create the transaction and broadcast it to the network. The transaction will be added to the database in order to
// ensure that we continue to re-broadcast the transaction upon restarts until it has been confirmed.
createdTx, err := w.CreateSimpleTx(account, outputs, minconf, satPerKb)
if err != nil {
Error(err)
return nil, err
}
return w.publishTransaction(createdTx.Tx)
}
// SignatureError records the underlying error when validating a transaction input signature.
type SignatureError struct {
InputIndex uint32
Error error
}
// SignTransaction uses secrets of the wallet, as well as additional secrets passed in by the caller, to create and add
// input signatures to a transaction.
//
// Transaction input script validation is used to confirm that all signatures are valid. For any invalid input, a
// SignatureError is added to the returns. The final error return is reserved for unexpected or fatal errors, such as
// being unable to determine a previous output script to redeem.
//
// The transaction pointed to by tx is modified by this function.
func (w *Wallet) SignTransaction(
tx *wire.MsgTx, hashType txscript.SigHashType,
additionalPrevScripts map[wire.OutPoint][]byte,
additionalKeysByAddress map[string]*util.WIF,
p2shRedeemScriptsByAddress map[string][]byte,
) ([]SignatureError, error) {
var signErrors []SignatureError
err := walletdb.View(
w.db, func(dbtx walletdb.ReadTx) error {
addrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)
for i, txIn := range tx.TxIn {
prevOutScript, ok := additionalPrevScripts[txIn.PreviousOutPoint]
if !ok {
prevHash := &txIn.PreviousOutPoint.Hash
prevIndex := txIn.PreviousOutPoint.Index
txDetails, err := w.TxStore.TxDetails(txmgrNs, prevHash)
if err != nil {
Error(err)
return fmt.Errorf(
"cannot query previous transaction "+
"details for %v: %v", txIn.PreviousOutPoint, err,
)
}
if txDetails == nil {
return fmt.Errorf(
"%v not found",
txIn.PreviousOutPoint,
)
}
prevOutScript = txDetails.MsgTx.TxOut[prevIndex].PkScript
}
// Set up our callbacks that we pass to txscript so it can look up the appropriate keys and scripts by
// address.
getKey := txscript.KeyClosure(
func(addr util.Address) (*ec.PrivateKey, bool, error) {
if len(additionalKeysByAddress) != 0 {
addrStr := addr.EncodeAddress()
wif, ok := additionalKeysByAddress[addrStr]
if !ok {
return nil, false,
errors.New("no key for address")
}
return wif.PrivKey, wif.CompressPubKey, nil
}
address, err := w.Manager.Address(addrmgrNs, addr)
if err != nil {
Error(err)
return nil, false, err
}
pka, ok := address.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return nil, false, fmt.Errorf(
"address %v is not "+
"a pubkey address", address.Address().EncodeAddress(),
)
}
key, err := pka.PrivKey()
if err != nil {
Error(err)
return nil, false, err
}
return key, pka.Compressed(), nil
},
)
getScript := txscript.ScriptClosure(
func(addr util.Address) ([]byte, error) {
// If keys were provided then we can only use the redeem scripts provided with our inputs, too.
if len(additionalKeysByAddress) != 0 {
addrStr := addr.EncodeAddress()
script, ok := p2shRedeemScriptsByAddress[addrStr]
if !ok {
return nil, errors.New("no script for address")
}
return script, nil
}
address, err := w.Manager.Address(addrmgrNs, addr)
if err != nil {
Error(err)
return nil, err
}
sa, ok := address.(waddrmgr.ManagedScriptAddress)
if !ok {
return nil, errors.New(
"address is not a script" +
" address",
)
}
return sa.Script()
},
)
// SigHashSingle inputs can only be signed if there's a corresponding output. However this could be already
// signed, so we always verify the output.
if (hashType&txscript.SigHashSingle) !=
txscript.SigHashSingle || i < len(tx.TxOut) {
script, err := txscript.SignTxOutput(
w.ChainParams(),
tx, i, prevOutScript, hashType, getKey,
getScript, txIn.SignatureScript,
)
// Failure to sign isn't an error, it just means that the tx isn't complete.
if err != nil {
Error(err)
signErrors = append(
signErrors, SignatureError{
InputIndex: uint32(i),
Error: err,
},
)
continue
}
txIn.SignatureScript = script
}
// Either it was already signed or we just signed it. Find out if it is completely satisfied or still needs
// more.
vm, err := txscript.NewEngine(
prevOutScript, tx, i,
txscript.StandardVerifyFlags, nil, nil, 0,
)
if err == nil {
err = vm.Execute()
}
if err != nil {
Error(err)
signErrors = append(
signErrors, SignatureError{
InputIndex: uint32(i),
Error: err,
},
)
}
}
return nil
},
)
return signErrors, err
}
// PublishTransaction sends the transaction to the consensus RPC server so it can be propagated to other nodes and
// eventually mined.
//
// This function is unstable and will be removed once syncing code is moved out of the wallet.
func (w *Wallet) PublishTransaction(tx *wire.MsgTx) error {
_, err := w.publishTransaction(tx)
return err
}
// publishTransaction is the private version of PublishTransaction which contains the primary logic required for
// publishing a transaction, updating the relevant database state, and finally possible removing the transaction from
// the database (along with cleaning up all inputs used, and outputs created) if the transaction is rejected by the back
// end.
func (w *Wallet) publishTransaction(tx *wire.MsgTx) (*chainhash.Hash, error) {
server, err := w.requireChainClient()
if err != nil {
Error(err)
return nil, err
}
// As we aim for this to be general reliable transaction broadcast API, we'll write this tx to disk as an
// unconfirmed transaction. This way, upon restarts, we'll always rebroadcast it, and also add it to our set of
// records.
txRec, err := wtxmgr.NewTxRecordFromMsgTx(tx, time.Now())
if err != nil {
Error(err)
return nil, err
}
err = walletdb.Update(
w.db, func(dbTx walletdb.ReadWriteTx) error {
return w.addRelevantTx(dbTx, txRec, nil)
},
)
if err != nil {
Error(err)
return nil, err
}
txid, err := server.SendRawTransaction(tx, false)
switch {
case err == nil:
return txid, nil
// The following are errors returned from pod's mempool.
case strings.Contains(err.Error(), "spent"):
fallthrough
case strings.Contains(err.Error(), "orphan"):
fallthrough
case strings.Contains(err.Error(), "conflict"):
fallthrough
// The following errors are returned from bitcoind's mempool.
case strings.Contains(err.Error(), "fee not met"):
fallthrough
case strings.Contains(err.Error(), "Missing inputs"):
fallthrough
case strings.Contains(err.Error(), "already in block chain"):
// If the transaction was rejected, then we'll remove it from the txstore, as otherwise, we'll attempt to
// continually re-broadcast it, and the utxo state of the wallet won't be accurate.
dbErr := walletdb.Update(
w.db, func(dbTx walletdb.ReadWriteTx) error {
txmgrNs := dbTx.ReadWriteBucket(wtxmgrNamespaceKey)
return w.TxStore.RemoveUnminedTx(txmgrNs, txRec)
},
)
if dbErr != nil {
return nil, fmt.Errorf(
"unable to broadcast tx: %v, "+
"unable to remove invalid tx: %v", err, dbErr,
)
}
return nil, err
default:
return nil, err
}
}
// ChainParams returns the network parameters for the blockchain the wallet belongs to.
func (w *Wallet) ChainParams() *netparams.Params {
return w.chainParams
}
// Database returns the underlying walletdb database. This method is provided in order to allow applications wrapping
// btcwallet to store app-specific data with the wallet's database.
func (w *Wallet) Database() walletdb.DB {
return w.db
}
// Create creates an new wallet, writing it to an empty database. If the passed seed is non-nil, it is used. Otherwise,
// a secure random seed of the recommended length is generated.
func Create(
db walletdb.DB, pubPass, privPass, seed []byte, params *netparams.Params,
birthday time.Time,
) error {
// If a seed was provided, ensure that it is of valid length. Otherwise, we generate a random seed for the wallet
// with the recommended seed length.
if seed == nil {
hdSeed, err := hdkeychain.GenerateSeed(
hdkeychain.RecommendedSeedLen,
)
if err != nil {
Error(err)
return err
}
seed = hdSeed
}
if len(seed) < hdkeychain.MinSeedBytes ||
len(seed) > hdkeychain.MaxSeedBytes {
return hdkeychain.ErrInvalidSeedLen
}
return walletdb.Update(
db, func(tx walletdb.ReadWriteTx) error {
addrmgrNs, err := tx.CreateTopLevelBucket(waddrmgrNamespaceKey)
if err != nil {
Error(err)
return err
}
txmgrNs, err := tx.CreateTopLevelBucket(wtxmgrNamespaceKey)
if err != nil {
Error(err)
return err
}
err = waddrmgr.Create(
addrmgrNs, seed, pubPass, privPass, params, nil,
birthday,
)
if err != nil {
Error(err)
return err
}
return wtxmgr.Create(txmgrNs)
},
)
}
// Open loads an already-created wallet from the passed database and namespaces.
func Open(
db walletdb.DB,
pubPass []byte,
cbs *waddrmgr.OpenCallbacks,
params *netparams.Params,
recoveryWindow uint32,
podConfig *pod.Config,
quit qu.C,
) (*Wallet, error) {
// debug.PrintStack()
Warn("opening wallet", string(pubPass))
err := walletdb.View(
db, func(tx walletdb.ReadTx) error {
waddrmgrBucket := tx.ReadBucket(waddrmgrNamespaceKey)
if waddrmgrBucket == nil {
return errors.New("missing address manager namespace")
}
wtxmgrBucket := tx.ReadBucket(wtxmgrNamespaceKey)
if wtxmgrBucket == nil {
return errors.New("missing transaction manager namespace")
}
return nil
},
)
if err != nil {
Error(err)
return nil, err
}
// Perform upgrades as necessary. Each upgrade is done under its own transaction, which is managed by each package
// itself, so the entire DB is passed instead of passing already opened write transaction.
//
// This will need to change later when upgrades in one package depend on data in another (such as removing chain
// synchronization from address manager).
err = waddrmgr.DoUpgrades(db, waddrmgrNamespaceKey, pubPass, params, cbs)
if err != nil {
Error(err)
return nil, err
}
err = wtxmgr.DoUpgrades(db, wtxmgrNamespaceKey)
if err != nil {
Error(err)
return nil, err
}
// Open database abstraction instances
var (
addrMgr *waddrmgr.Manager
txMgr *wtxmgr.Store
)
err = walletdb.View(
db, func(tx walletdb.ReadTx) error {
addrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
var err error
addrMgr, err = waddrmgr.Open(addrmgrNs, pubPass, params)
if err != nil {
Error(err, "'"+string(pubPass)+"'")
return err
}
txMgr, err = wtxmgr.Open(txmgrNs, params)
return err
},
)
if err != nil {
Error(err)
return nil, err
}
Trace("opened wallet") // TODO: log balance? last sync height?
w := &Wallet{
publicPassphrase: pubPass,
db: db,
Manager: addrMgr,
TxStore: txMgr,
lockedOutpoints: map[wire.OutPoint]struct{}{},
recoveryWindow: recoveryWindow,
rescanAddJob: make(chan *RescanJob),
rescanBatch: make(chan *rescanBatch),
rescanNotifications: make(chan interface{}),
rescanProgress: make(chan *RescanProgressMsg),
rescanFinished: make(chan *RescanFinishedMsg),
createTxRequests: make(chan createTxRequest),
unlockRequests: make(chan unlockRequest),
lockRequests: qu.T(),
holdUnlockRequests: make(chan chan heldUnlock),
lockState: make(chan bool),
changePassphrase: make(chan changePassphraseRequest),
changePassphrases: make(chan changePassphrasesRequest),
chainParams: params,
PodConfig: podConfig,
quit: quit,
}
w.NtfnServer = newNotificationServer(w)
w.TxStore.NotifyUnspent = func(hash *chainhash.Hash, index uint32) {
w.NtfnServer.notifyUnspentOutput(0, hash, index)
}
return w, nil
}
| {
continue
} |
get_group_progress.go | package groups
import (
"net/http"
"reflect"
"strconv"
"strings"
"github.com/France-ioi/mapstructure"
"github.com/go-chi/render"
"github.com/jinzhu/gorm"
"github.com/France-ioi/AlgoreaBackend/app/database"
"github.com/France-ioi/AlgoreaBackend/app/service"
)
// swagger:model groupGroupProgressResponseTableCell
type groupGroupProgressResponseTableCell struct {
// The childโs `group_id`
// required:true
GroupID int64 `json:"group_id,string"`
// required:true
ItemID int64 `json:"item_id,string"`
// Average score of all "end-members".
// The score of an "end-member" is the max of his `results.score` or 0 if no results.
// required:true
AverageScore float32 `json:"average_score"`
// % (float [0,1]) of "end-members" who have validated the task.
// An "end-member" has validated a task if one of his results has `results.validated` = 1.
// No results for an "end-member" is considered as not validated.
// required:true
ValidationRate float32 `json:"validation_rate"`
// Average number of hints requested by each "end-member".
// The number of hints requested of an "end-member" is the `results.hints_cached`
// of the result with the best score
// (if several with the same score, we use the first result chronologically on `score_obtained_at`).
// required:true
AvgHintsRequested float32 `json:"avg_hints_requested"`
// Average number of submissions made by each "end-member".
// The number of submissions made by an "end-member" is the `results.submissions`.
// of the result with the best score
// (if several with the same score, we use the first result chronologically on `score_obtained_at`).
// required:true
AvgSubmissions float32 `json:"avg_submissions"`
// Average time spent among all the "end-members" (in seconds). The time spent by an "end-member" is computed as:
//
// 1) if no results yet: 0
//
// 2) if one result validated: min(`validated_at`) - min(`started_at`)
// (i.e., time between the first time it started one (any) result
// and the time he first validated the task)
//
// 3) if no results validated: `now` - min(`started_at`)
// required:true
AvgTimeSpent float32 `json:"avg_time_spent"`
}
// swagger:operation GET /groups/{group_id}/group-progress groups groupGroupProgress
// ---
// summary: Get group progress
// description: >
// Returns the current progress of a group on a subset of items.
//
//
// For each item from `{parent_item_id}` and its visible children, displays the result
// of each direct child of the given `group_id` whose type is not in (Team, User).
//
//
// Restrictions:
//
// * The current user should be a manager of the group (or of one of its ancestors)
// with `can_watch_members` set to true,
//
// * The current user should have `can_watch_members` >= 'result' on each of `{parent_item_ids}` items,
//
//
// otherwise the 'forbidden' error is returned.
// parameters:
// - name: group_id
// in: path
// type: integer
// required: true
// - name: parent_item_ids
// in: query
// type: array
// required: true
// items:
// type: integer
// - name: from.name
// description: Start the page from the group next to the group with `name` = `from.name` and `id` = `from.id`
// (`from.id` is required when `from.name` is present)
// in: query
// type: string
// - name: from.id
// description: Start the page from the group next to the group with `name`=`from.name` and `id`=`from.id`
// (`from.name` is required when from.id is present)
// in: query
// type: integer
// - name: limit
// description: Display results for the first N groups (sorted by `name`)
// in: query
// type: integer
// maximum: 20
// default: 10
// responses:
// "200":
// description: >
// OK. Success response with groups progress on items
// For each item from `{parent_item_id}` and its visible children, displays the result for each direct child
// of the given group_id whose type is not in (Team, User). Values are averages of all the group's
// "end-members" where โend-memberโ defined as descendants of the group which are either
// 1) teams or
// 2) users who descend from the input group not only through teams (one or more).
// schema:
// type: array
// items:
// "$ref": "#/definitions/groupGroupProgressResponseTableCell"
// "400":
// "$ref": "#/responses/badRequestResponse"
// "401":
// "$ref": "#/responses/unauthorizedResponse"
// "403":
// "$ref": "#/responses/forbiddenResponse"
// "500":
// "$ref": "#/responses/internalErrorResponse"
func (srv *Service) getGroupProgress(w http.ResponseWriter, r *http.Request) service.APIError {
user := srv.GetUser(r)
groupID, err := service.ResolveURLQueryPathInt64Field(r, "group_id")
if err != nil {
return service.ErrInvalidRequest(err)
}
if apiError := checkThatUserCanWatchGroupMembers(srv.Store, user, groupID); apiError != service.NoError {
return apiError
}
itemParentIDs, apiError := srv.resolveAndCheckParentIDs(r, user)
if apiError != service.NoError {
return apiError
}
if len(itemParentIDs) == 0 {
render.Respond(w, r, []map[string]interface{}{})
return service.NoError
}
// Preselect item IDs since we want to use them twice (for end members stats and for final stats)
// There should not be many of them
orderedItemIDListWithDuplicates, uniqueItemIDs, _, itemsSubQuery := srv.preselectIDsOfVisibleItems(itemParentIDs, user)
// Preselect IDs of groups for that we will calculate the final stats.
// All the "end members" are descendants of these groups.
// There should not be too many of groups because we paginate on them.
var ancestorGroupIDs []interface{}
ancestorGroupIDQuery := srv.Store.ActiveGroupGroups().
Where("groups_groups_active.parent_group_id = ?", groupID).
Joins(`
JOIN ` + "`groups`" + ` AS group_child
ON group_child.id = groups_groups_active.child_group_id AND group_child.type NOT IN('Team', 'User')`)
ancestorGroupIDQuery, apiError = service.ApplySortingAndPaging(r, ancestorGroupIDQuery, map[string]*service.FieldSortingParams{
// Note that we require the 'from.name' request parameter although the service does not return group names
"name": {ColumnName: "group_child.name", FieldType: "string"},
"id": {ColumnName: "group_child.id", FieldType: "int64"},
}, "name,id", []string{"id"}, false)
if apiError != service.NoError {
return apiError
}
ancestorGroupIDQuery = service.NewQueryLimiter().
SetDefaultLimit(10).SetMaxAllowedLimit(20).
Apply(r, ancestorGroupIDQuery)
service.MustNotBeError(ancestorGroupIDQuery.
Pluck("group_child.id", &ancestorGroupIDs).Error())
if len(ancestorGroupIDs) == 0 {
render.Respond(w, r, []map[string]interface{}{})
return service.NoError
}
endMembers := srv.Store.Groups().
Select("groups.id").
Joins(`
JOIN groups_ancestors_active
ON groups_ancestors_active.ancestor_group_id IN (?) AND
groups_ancestors_active.child_group_id = groups.id`, ancestorGroupIDs).
Where("groups.type = 'Team' OR groups.type = 'User'").
Group("groups.id")
endMembersStats := srv.Store.Raw(`
SELECT
end_members.id,
items.id AS item_id,
IFNULL(result_with_best_score.score, 0) AS score,
IFNULL(result_with_best_score.validated, 0) AS validated,
IFNULL(result_with_best_score.hints_cached, 0) AS hints_cached,
IFNULL(result_with_best_score.submissions, 0) AS submissions,
IF(result_with_best_score.participant_id IS NULL,
0,
(
SELECT GREATEST(IF(result_with_best_score.validated,
TIMESTAMPDIFF(SECOND, MIN(started_at), MIN(validated_at)),
TIMESTAMPDIFF(SECOND, MIN(started_at), NOW())
), 0)
FROM results
WHERE participant_id = end_members.id AND item_id = items.id
)
) AS time_spent
FROM ? AS end_members`, endMembers.SubQuery()).
Joins("JOIN ? AS items", itemsSubQuery).
Joins(`
LEFT JOIN LATERAL (
SELECT score_computed AS score, validated, hints_cached, submissions, participant_id
FROM results
WHERE participant_id = end_members.id AND item_id = items.id
ORDER BY participant_id, item_id, score_computed DESC, score_obtained_at
LIMIT 1
) AS result_with_best_score ON 1`)
var result []*groupGroupProgressResponseTableCell
// It still takes more than 2 minutes to complete on large data sets
scanAndBuildProgressResults(
srv.Store.ActiveGroupAncestors().
Select(`
groups_ancestors_active.ancestor_group_id AS group_id,
member_stats.item_id,
AVG(member_stats.score) AS average_score,
AVG(member_stats.validated) AS validation_rate,
AVG(member_stats.hints_cached) AS avg_hints_requested,
AVG(member_stats.submissions) AS avg_submissions,
AVG(member_stats.time_spent) AS avg_time_spent`).
Joins("JOIN ? AS member_stats ON member_stats.id = groups_ancestors_active.child_group_id", endMembersStats.SubQuery()).
Where("groups_ancestors_active.ancestor_group_id IN (?)", ancestorGroupIDs).
Group("groups_ancestors_active.ancestor_group_id, member_stats.item_id").
Order(gorm.Expr(
"FIELD(groups_ancestors_active.ancestor_group_id"+strings.Repeat(", ?", len(ancestorGroupIDs))+")",
ancestorGroupIDs...)),
orderedItemIDListWithDuplicates, len(uniqueItemIDs), &result,
)
render.Respond(w, r, result)
return service.NoError
}
func (srv *Service) preselectIDsOfVisibleItems(itemParentIDs []int64, user *database.User) (
orderedItemIDListWithDuplicates []interface{}, uniqueItemIDs []string, itemOrder []int, itemsSubQuery interface{}) {
itemParentIDsAsIntSlice := make([]interface{}, len(itemParentIDs))
for i, parentID := range itemParentIDs {
itemParentIDsAsIntSlice[i] = parentID
}
var parentChildPairs []struct {
ParentItemID int64
ChildItemID int64
}
service.MustNotBeError(srv.Store.ItemItems().
Select("items_items.child_item_id AS id").
Where("parent_item_id IN (?)", itemParentIDs).
Joins("JOIN ? AS permissions ON permissions.item_id = items_items.child_item_id",
srv.Store.Permissions().MatchingUserAncestors(user).
Select("item_id").
WherePermissionIsAtLeast("view", "info").SubQuery()).
Order(gorm.Expr(
"FIELD(items_items.parent_item_id"+strings.Repeat(", ?", len(itemParentIDs))+"), items_items.child_order",
itemParentIDsAsIntSlice...)).
Group("items_items.parent_item_id, items_items.child_item_id").
Select("items_items.parent_item_id, items_items.child_item_id").
Scan(&parentChildPairs).Error())
// parent1_id, child1_1_id, ..., parent2_id, child2_1_id, ...
orderedItemIDListWithDuplicates = make([]interface{}, 0, len(itemParentIDs)+len(parentChildPairs))
itemOrder = make([]int, 0, len(itemParentIDs)+len(parentChildPairs))
currentParentIDIndex := 0
// child_id -> true, will be used to construct a list of unique item ids
childItemIDMap := make(map[int64]bool, len(parentChildPairs))
orderedItemIDListWithDuplicates = append(orderedItemIDListWithDuplicates, itemParentIDs[0])
itemOrder = append(itemOrder, 0)
currentChildNumber := 0
for i := range parentChildPairs {
for itemParentIDs[currentParentIDIndex] != parentChildPairs[i].ParentItemID {
currentParentIDIndex++
currentChildNumber = 0
orderedItemIDListWithDuplicates = append(orderedItemIDListWithDuplicates, itemParentIDs[currentParentIDIndex])
itemOrder = append(itemOrder, 0)
}
orderedItemIDListWithDuplicates = append(orderedItemIDListWithDuplicates, parentChildPairs[i].ChildItemID)
childItemIDMap[parentChildPairs[i].ChildItemID] = true
currentChildNumber++
itemOrder = append(itemOrder, currentChildNumber)
}
// Create an unordered list of all the unique item ids (parents and children).
// Note: itemParentIDs slice doesn't contain duplicates because resolveAndCheckParentIDs() guarantees that.
itemIDs := make([]string, len(itemParentIDs), len(childItemIDMap)+len(itemParentIDs))
for i, parentID := range itemParentIDs {
itemIDs[i] = strconv.FormatInt(parentID, 10)
}
for itemID := range childItemIDMap {
itemIDs = append(itemIDs, strconv.FormatInt(itemID, 10))
}
itemsSubQuery = gorm.Expr(`JSON_TABLE('[` + strings.Join(itemIDs, ", ") + `]', "$[*]" COLUMNS(id BIGINT PATH "$"))`)
return orderedItemIDListWithDuplicates, itemIDs, itemOrder, itemsSubQuery
}
func appendTableRowToResult(orderedItemIDListWithDuplicates []interface{}, reflResultRowMap reflect.Value, resultPtr interface{}) {
// | esultPtr should be a pointer to a slice of pointers to table cells
func scanAndBuildProgressResults(
query *database.DB, orderedItemIDListWithDuplicates []interface{}, uniqueItemsCount int, resultPtr interface{}) {
// resultPtr is *[]*tableCellType
reflTableCellType := reflect.TypeOf(resultPtr).Elem().Elem().Elem()
reflDecodedTableCell := reflect.New(reflTableCellType).Elem()
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
// will convert strings with time in DB format to database.Time
DecodeHook: mapstructure.ComposeDecodeHookFunc(
func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(database.Time{}) {
return data, nil
}
// Convert it by parsing
result := &database.Time{}
err := result.ScanString(data.(string))
return *result, err
},
),
Result: reflDecodedTableCell.Addr().Interface(),
TagName: "json",
ZeroFields: true, // this marks keys with null values as used
WeaklyTypedInput: true,
})
service.MustNotBeError(err)
// here we will store results for each item: map[int64]*tableCellType
reflResultRowMap := reflect.MakeMapWithSize(
reflect.MapOf(reflect.TypeOf(int64(0)), reflect.PtrTo(reflTableCellType)), uniqueItemsCount)
previousGroupID := int64(-1)
service.MustNotBeError(query.ScanAndHandleMaps(func(cell map[string]interface{}) error {
// convert map[string]interface{} into tableCellType and store the result in reflDecodedTableCell
service.MustNotBeError(decoder.Decode(cell))
groupID := reflDecodedTableCell.FieldByName("GroupID").Interface().(int64)
if groupID != previousGroupID {
if previousGroupID != -1 {
// Moving to a next row of the results table, so we should insert cells from the map into the results slice
appendTableRowToResult(orderedItemIDListWithDuplicates, reflResultRowMap, resultPtr)
// and initialize a new map for cells
reflResultRowMap = reflect.MakeMapWithSize(reflect.MapOf(reflect.TypeOf(int64(0)), reflect.PtrTo(reflTableCellType)), uniqueItemsCount)
}
previousGroupID = groupID
}
// as reflDecodedTableCell will be reused on the next step of the loop, we should create a copy
reflDecodedRowCopy := reflect.New(reflTableCellType).Elem()
reflDecodedRowCopy.Set(reflDecodedTableCell)
reflResultRowMap.SetMapIndex(reflDecodedTableCell.FieldByName("ItemID"), reflDecodedRowCopy.Addr())
return nil
}).Error())
// store the last row of the table
appendTableRowToResult(orderedItemIDListWithDuplicates, reflResultRowMap, resultPtr)
}
func (srv *Service) resolveAndCheckParentIDs(r *http.Request, user *database.User) ([]int64, service.APIError) {
itemParentIDs, err := service.ResolveURLQueryGetInt64SliceField(r, "parent_item_ids")
if err != nil {
return nil, service.ErrInvalidRequest(err)
}
itemParentIDs = uniqueIDs(itemParentIDs)
if len(itemParentIDs) > 0 {
var cnt int
service.MustNotBeError(srv.Store.Permissions().MatchingUserAncestors(user).
WherePermissionIsAtLeast("watch", "result").Where("item_id IN(?)", itemParentIDs).
PluckFirst("COUNT(DISTINCT item_id)", &cnt).Error())
if cnt != len(itemParentIDs) {
return nil, service.InsufficientAccessRightsError
}
}
return itemParentIDs, service.NoError
}
func uniqueIDs(ids []int64) []int64 {
idsMap := make(map[int64]bool, len(ids))
result := make([]int64, 0, len(ids))
for _, id := range ids {
if !idsMap[id] {
result = append(result, id)
idsMap[id] = true
}
}
return result
}
| resultPtr is *[]*tableCellType
reflTableCellType := reflect.TypeOf(resultPtr).Elem().Elem().Elem()
// []*tableCellType
reflTableRow := reflect.MakeSlice(
reflect.SliceOf(reflect.PtrTo(reflTableCellType)), len(orderedItemIDListWithDuplicates), len(orderedItemIDListWithDuplicates))
// Here we fill the table row with cells. As an item can be a child of multiple parents, the row may contain duplicates.
for index, itemID := range orderedItemIDListWithDuplicates {
reflTableRow.Index(index).Set(reflResultRowMap.MapIndex(reflect.ValueOf(itemID)))
}
reflResultPtr := reflect.ValueOf(resultPtr)
// this means: *resultPtr = append(*resultPtr, tableRow)
reflResultPtr.Elem().Set(reflect.AppendSlice(reflResultPtr.Elem(), reflTableRow))
}
// r |
shadow.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/baetyl/baetyl-cloud/v2/plugin (interfaces: Shadow)
// Package plugin is a generated GoMock package.
package plugin
import (
models "github.com/baetyl/baetyl-cloud/v2/models"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockShadow is a mock of Shadow interface
type MockShadow struct {
ctrl *gomock.Controller
recorder *MockShadowMockRecorder
}
// MockShadowMockRecorder is the mock recorder for MockShadow
type MockShadowMockRecorder struct {
mock *MockShadow
}
// NewMockShadow creates a new mock instance
func NewMockShadow(ctrl *gomock.Controller) *MockShadow {
mock := &MockShadow{ctrl: ctrl}
mock.recorder = &MockShadowMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockShadow) EXPECT() *MockShadowMockRecorder {
return m.recorder
}
// Close mocks base method
func (m *MockShadow) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close
func (mr *MockShadowMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShadow)(nil).Close))
}
// Create mocks base method
func (m *MockShadow) Create(arg0 *models.Shadow) (*models.Shadow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", arg0)
ret0, _ := ret[0].(*models.Shadow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
| return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockShadow)(nil).Create), arg0)
}
// Delete mocks base method
func (m *MockShadow) Delete(arg0, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockShadowMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockShadow)(nil).Delete), arg0, arg1)
}
// Get mocks base method
func (m *MockShadow) Get(arg0, arg1 string) (*models.Shadow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0, arg1)
ret0, _ := ret[0].(*models.Shadow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get
func (mr *MockShadowMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockShadow)(nil).Get), arg0, arg1)
}
// List mocks base method
func (m *MockShadow) List(arg0 string, arg1 *models.NodeList) (*models.ShadowList, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "List", arg0, arg1)
ret0, _ := ret[0].(*models.ShadowList)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// List indicates an expected call of List
func (mr *MockShadowMockRecorder) List(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockShadow)(nil).List), arg0, arg1)
}
// UpdateDesire mocks base method
func (m *MockShadow) UpdateDesire(arg0 *models.Shadow) (*models.Shadow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateDesire", arg0)
ret0, _ := ret[0].(*models.Shadow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateDesire indicates an expected call of UpdateDesire
func (mr *MockShadowMockRecorder) UpdateDesire(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDesire", reflect.TypeOf((*MockShadow)(nil).UpdateDesire), arg0)
}
// UpdateReport mocks base method
func (m *MockShadow) UpdateReport(arg0 *models.Shadow) (*models.Shadow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateReport", arg0)
ret0, _ := ret[0].(*models.Shadow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateReport indicates an expected call of UpdateReport
func (mr *MockShadowMockRecorder) UpdateReport(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateReport", reflect.TypeOf((*MockShadow)(nil).UpdateReport), arg0)
} | // Create indicates an expected call of Create
func (mr *MockShadowMockRecorder) Create(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper() |
base.py | # coding: utf-8
# pylint: disable=invalid-name, no-member
"""ctypes library of mxnet and helper functions."""
from __future__ import absolute_import
import sys
import ctypes
import atexit
import warnings
import inspect
import numpy as np
from . import libinfo
warnings.filterwarnings('default', category=DeprecationWarning)
__all__ = ['MXNetError']
#----------------------------
# library loading
#----------------------------
if sys.version_info[0] == 3:
string_types = str,
numeric_types = (float, int, np.float32, np.int32)
integer_types = int
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
py_str = lambda x: x.decode('utf-8')
else:
string_types = basestring,
numeric_types = (float, int, long, np.float32, np.int32)
integer_types = (int, long)
py_str = lambda x: x
class _NullType(object):
"""Placeholder for arguments"""
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
"""Error that will be throwed by all mxnet functions."""
pass
class NotImplementedForSymbol(MXNetError):
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
def _load_lib():
"""Load library by searching possible path."""
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
# version number
__version__ = libinfo.__version__
# library instance of mxnet
_LIB = _load_lib()
# type definitions
mx_uint = ctypes.c_uint
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
#----------------------------
# helper function definition
#----------------------------
def check_call(ret):
"""Check the return value of C API call.
This function will raise an exception when an error occurs.
Wrap every API call with this function.
Parameters
----------
ret : int
return value from API calls.
"""
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
"""
return (ctype * len(values))(*values)
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type.
Parameters
----------
cptr : ctypes.POINTER(ctypes.c_char)
Pointer to the raw memory region.
length : int
The length of the buffer.
Returns
-------
buffer : bytearray
The raw byte memory buffer.
"""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
"""Notify MXNet about a shutdown."""
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
"""
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
|
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
| continue |
test_ordering_service_on_view_change.py | import pytest
from plenum.common.messages.internal_messages import ViewChangeStarted, NewViewAccepted, NewViewCheckpointsApplied
from plenum.common.messages.node_messages import OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.util import updateNamedTuple
from plenum.server.consensus.batch_id import BatchID
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.utils import preprepare_to_batch_id
from plenum.server.replica_helper import generateName
from plenum.test.consensus.helper import copy_shared_data, create_batches, \
check_service_changed_only_owned_fields_in_shared_data, create_new_view, \
create_pre_prepares, create_batches_from_preprepares
from plenum.test.consensus.order_service.helper import check_prepares_sent, check_request_old_view_preprepares_sent, \
check_reply_old_view_preprepares_sent
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root, create_prepare, create_commit_no_bls_sig
from plenum.test.consensus.order_service.conftest import orderer as _orderer
applied_pre_prepares = 0
@pytest.fixture(params=[True, False], ids=['Primary', 'Non-Primary'])
def is_primary(request):
return request.param == 'Primary'
@pytest.fixture()
def orderer(_orderer, is_primary, ):
_orderer.name = 'Alpha:0'
_orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name
def _apply_and_validate_applied_pre_prepare_fake(pp, sender):
global applied_pre_prepares
applied_pre_prepares += 1
_orderer._can_process_pre_prepare = lambda pp, sender: None
_orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake
return _orderer
@pytest.fixture()
def initial_view_no():
return 3
@pytest.fixture()
def pre_prepares(initial_view_no):
|
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def stored_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def requested_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
def test_update_shared_data_on_view_change_started(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=3)
orderer._data.prepared = create_batches(view_no=3)
old_data = copy_shared_data(orderer._data)
internal_bus.send(ViewChangeStarted(view_no=4))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
def test_clear_data_on_view_change_started(internal_bus, orderer):
pp = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=10, inst_id=0,
audit_txn_root="HSai3sMHKeAva4gWMabDrm1yNhezvPHfXnGyHf2ex1L4")
prepare = create_prepare(req_key=(0, 10),
state_root=generate_state_root(), inst_id=0)
commit = create_commit_no_bls_sig(req_key=(0, 10), inst_id=0)
key = (pp.viewNo, pp.ppSeqNo)
orderer.prePrepares[key] = pp
orderer.prepares[key] = prepare
orderer.commits[key] = commit
orderer.pre_prepare_tss[key][pp.auditTxnRootHash, "Node1"] = 1234
orderer.prePreparesPendingFinReqs.append(pp)
orderer.prePreparesPendingPrevPP[key] = pp
orderer.sent_preprepares[key] = pp
orderer.batches[key] = [pp.ledgerId, pp.discarded,
pp.ppTime, generate_state_root(), len(pp.reqIdr)]
orderer.ordered.add(*key)
internal_bus.send(ViewChangeStarted(view_no=4))
assert not orderer.prePrepares
assert not orderer.prepares
assert not orderer.commits
assert not orderer.pre_prepare_tss
assert not orderer.prePreparesPendingFinReqs
assert not orderer.prePreparesPendingPrevPP
assert not orderer.sent_preprepares
assert not orderer.batches
assert not orderer.ordered
def test_stores_old_pre_prepares_on_view_change_started(internal_bus, orderer):
pp1 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=1, inst_id=0)
pp2 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=2, inst_id=0)
pp3 = create_pre_prepare_no_bls(generate_state_root(),
view_no=1, pp_seq_no=3, inst_id=0)
pp4 = create_pre_prepare_no_bls(generate_state_root(),
view_no=2, pp_seq_no=4, inst_id=0)
pp5 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=5, inst_id=0)
pp6 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=6, inst_id=0)
orderer.prePrepares[(pp1.viewNo, pp1.ppSeqNo)] = pp1
orderer.prePrepares[(pp3.viewNo, pp3.ppSeqNo)] = pp3
orderer.sent_preprepares[(pp2.viewNo, pp2.ppSeqNo)] = pp2
orderer.sent_preprepares[(pp4.viewNo, pp4.ppSeqNo)] = pp4
assert not orderer.old_view_preprepares
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
# next calls append to existing data
orderer.prePrepares[(pp5.viewNo, pp5.ppSeqNo)] = pp5
orderer.sent_preprepares[(pp6.viewNo, pp6.ppSeqNo)] = pp6
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
assert orderer.old_view_preprepares[(pp5.viewNo, pp5.ppSeqNo, pp5.digest)] == pp5
assert orderer.old_view_preprepares[(pp6.viewNo, pp6.ppSeqNo, pp6.digest)] == pp6
def test_do_nothing_on_new_view_accepted(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=0)
orderer._data.prepared = create_batches(view_no=0)
old_data = copy_shared_data(orderer._data)
initial_view_no = 3
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewAccepted(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
assert old_data == new_data
def test_update_shared_data_on_new_view_checkpoint_applied(internal_bus, orderer):
initial_view_no = 3
orderer._data.preprepared = []
orderer._data.prepared = []
orderer._data.view_no = initial_view_no + 1
old_data = copy_shared_data(orderer._data)
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
# Since we didn't order the PrePrepare from Batches, it should not be added into shared data
# (we will request the PrePrepares instead, see next tests)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
@pytest.mark.parametrize('all_ordered', [True, False], ids=['All-ordered', 'All-non-ordered'])
def test_process_preprepare_on_new_view_checkpoint_applied(internal_bus, external_bus,
orderer, is_primary,
all_ordered,
initial_view_no,
pre_prepares, stored_old_view_pre_prepares):
# !!!SETUP!!!
orderer._data.view_no = initial_view_no + 1
batches = create_batches_from_preprepares(pre_prepares)
orderer._data.prev_view_prepare_cert = batches[-1].pp_seq_no
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=batches)
# emulate that we received all PrePrepares before View Change
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# emulate that we've already ordered the PrePrepares
if all_ordered and stored_old_view_pre_prepares:
orderer.last_ordered_3pc = (initial_view_no, stored_old_view_pre_prepares[-1].ppSeqNo)
# !!!EXECUTE!!!
# send NewViewCheckpointsApplied
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
# !!!CHECK!!!
if not orderer.is_master:
# no re-ordering is expected on non-master
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
# check that PPs were added
stored_batch_ids = [preprepare_to_batch_id(pp) for pp in stored_old_view_pre_prepares]
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=initial_view_no,
pp_seq_no=batch_id.pp_seq_no, pp_digest=batch_id.pp_digest)
for batch_id in new_view.batches if batch_id in stored_batch_ids]
# check that sentPrePrepares is updated in case of Primary and prePrepares in case of non-primary
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in stored_old_view_pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
# check that Prepare is sent in case of non primary
if not is_primary:
check_prepares_sent(external_bus, stored_old_view_pre_prepares, initial_view_no + 1)
else:
# only MessageReqs are sent
assert len(external_bus.sent_messages) == len(pre_prepares) - len(stored_old_view_pre_prepares)
# we don't have a quorum of Prepares yet
assert orderer._data.prepared == []
# check that missing PrePrepares have been requested
expected_requested_batches = [batch_id for batch_id in new_view.batches if batch_id not in stored_batch_ids]
check_request_old_view_preprepares_sent(external_bus, expected_requested_batches)
def test_send_reply_on_old_view_pre_prepares_request(external_bus, orderer,
initial_view_no,
stored_old_view_pre_prepares,
requested_old_view_pre_prepares):
# Setup
orderer._data.view_no = initial_view_no + 2
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# Receive OldViewPrePrepareRequest req
batches = [preprepare_to_batch_id(pp) for pp in requested_old_view_pre_prepares]
req = OldViewPrePrepareRequest(0, batches)
frm = "node1"
orderer._network.process_incoming(req, generateName(frm, orderer._data.inst_id))
# Check that OldViewPrePrepareReply is sent for all requested PrePrepares
if not orderer.is_master:
assert len(external_bus.sent_messages) == 0
return
# equal to set's union operation
expected_pps = [i for i in stored_old_view_pre_prepares if i in requested_old_view_pre_prepares]
expected_pps = sorted(expected_pps, key=lambda pp: pp.ppSeqNo)
check_reply_old_view_preprepares_sent(external_bus, frm, expected_pps)
def test_process_preprepare_on_old_view_pre_prepares_reply(external_bus, internal_bus,
orderer, is_primary,
initial_view_no,
pre_prepares):
# !!!SETUP!!!
orderer._data.view_no = initial_view_no + 1
orderer._data.prev_view_prepare_cert = orderer.lastPrePrepareSeqNo + 1
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=create_batches_from_preprepares(pre_prepares))
orderer._data.new_view = new_view
# !!!EXECUTE!!!
rep = OldViewPrePrepareReply(0, [pp._asdict() for pp in pre_prepares])
orderer._network.process_incoming(rep, generateName("node1", orderer._data.inst_id))
# !!!CHECK!!!
if not orderer.is_master:
# no re-ordering is expected on non-master
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
# check that PPs were added
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=pp.viewNo,
pp_seq_no=pp.ppSeqNo, pp_digest=pp.digest)
for pp in pre_prepares]
# check that sent_preprepares is updated in case of Primary and prePrepares in case of non-primary
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
# check that Prepare is sent in case of non primary
if not is_primary:
check_prepares_sent(external_bus, pre_prepares, initial_view_no + 1)
else:
assert len(external_bus.sent_messages) == 0
# we don't have a quorum of Prepares yet
assert orderer._data.prepared == []
| return create_pre_prepares(view_no=initial_view_no) |
main.rs | fn | () {
// let connection = sqlite::open(":memory:").unwrap();
// let connection = sqlite3::sqlite::open(":memory:").unwrap();
// let connection = sqlite3::open(":memory:").unwrap();
let connection = sqlite3::open("/tmp/work/db.sqlite3").unwrap();
connection
.execute(
"
CREATE TABLE users (name TEXT, age INTEGER);
INSERT INTO users (name, age) VALUES ('Alice', 42);
INSERT INTO users (name, age) VALUES ('Bob', 69);
",
)
.unwrap();
connection
.iterate("SELECT * FROM users WHERE age > 50", |pairs| {
for &(column, value) in pairs.iter() {
println!("{} = {}", column, value.unwrap());
}
true
})
.unwrap();
}
| main |
__init__.py | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .change_database_tools_connection_compartment_details import ChangeDatabaseToolsConnectionCompartmentDetails
from .change_database_tools_private_endpoint_compartment_details import ChangeDatabaseToolsPrivateEndpointCompartmentDetails
from .create_database_tools_connection_details import CreateDatabaseToolsConnectionDetails
from .create_database_tools_connection_oracle_database_details import CreateDatabaseToolsConnectionOracleDatabaseDetails
from .create_database_tools_private_endpoint_details import CreateDatabaseToolsPrivateEndpointDetails
from .create_database_tools_related_resource_details import CreateDatabaseToolsRelatedResourceDetails
from .database_tools_allowed_network_sources import DatabaseToolsAllowedNetworkSources
from .database_tools_connection import DatabaseToolsConnection
from .database_tools_connection_collection import DatabaseToolsConnectionCollection
from .database_tools_connection_oracle_database import DatabaseToolsConnectionOracleDatabase
from .database_tools_connection_oracle_database_summary import DatabaseToolsConnectionOracleDatabaseSummary
from .database_tools_connection_summary import DatabaseToolsConnectionSummary
from .database_tools_endpoint_service import DatabaseToolsEndpointService
from .database_tools_endpoint_service_collection import DatabaseToolsEndpointServiceCollection
from .database_tools_endpoint_service_summary import DatabaseToolsEndpointServiceSummary
from .database_tools_key_store import DatabaseToolsKeyStore
from .database_tools_key_store_content import DatabaseToolsKeyStoreContent
from .database_tools_key_store_content_details import DatabaseToolsKeyStoreContentDetails
from .database_tools_key_store_content_secret_id import DatabaseToolsKeyStoreContentSecretId
from .database_tools_key_store_content_secret_id_details import DatabaseToolsKeyStoreContentSecretIdDetails
from .database_tools_key_store_content_secret_id_summary import DatabaseToolsKeyStoreContentSecretIdSummary
from .database_tools_key_store_content_summary import DatabaseToolsKeyStoreContentSummary
from .database_tools_key_store_details import DatabaseToolsKeyStoreDetails
from .database_tools_key_store_password import DatabaseToolsKeyStorePassword
from .database_tools_key_store_password_details import DatabaseToolsKeyStorePasswordDetails
from .database_tools_key_store_password_secret_id import DatabaseToolsKeyStorePasswordSecretId
from .database_tools_key_store_password_secret_id_details import DatabaseToolsKeyStorePasswordSecretIdDetails
from .database_tools_key_store_password_secret_id_summary import DatabaseToolsKeyStorePasswordSecretIdSummary
from .database_tools_key_store_password_summary import DatabaseToolsKeyStorePasswordSummary
from .database_tools_key_store_summary import DatabaseToolsKeyStoreSummary
from .database_tools_private_endpoint import DatabaseToolsPrivateEndpoint
from .database_tools_private_endpoint_collection import DatabaseToolsPrivateEndpointCollection
from .database_tools_private_endpoint_reverse_connection_configuration import DatabaseToolsPrivateEndpointReverseConnectionConfiguration
from .database_tools_private_endpoint_reverse_connections_source_ip import DatabaseToolsPrivateEndpointReverseConnectionsSourceIp
from .database_tools_private_endpoint_summary import DatabaseToolsPrivateEndpointSummary
from .database_tools_related_resource import DatabaseToolsRelatedResource
from .database_tools_user_password import DatabaseToolsUserPassword
from .database_tools_user_password_details import DatabaseToolsUserPasswordDetails
from .database_tools_user_password_secret_id import DatabaseToolsUserPasswordSecretId
from .database_tools_user_password_secret_id_details import DatabaseToolsUserPasswordSecretIdDetails | from .update_database_tools_connection_details import UpdateDatabaseToolsConnectionDetails
from .update_database_tools_connection_oracle_database_details import UpdateDatabaseToolsConnectionOracleDatabaseDetails
from .update_database_tools_private_endpoint_details import UpdateDatabaseToolsPrivateEndpointDetails
from .update_database_tools_related_resource_details import UpdateDatabaseToolsRelatedResourceDetails
from .validate_database_tools_connection_details import ValidateDatabaseToolsConnectionDetails
from .validate_database_tools_connection_oracle_database_details import ValidateDatabaseToolsConnectionOracleDatabaseDetails
from .validate_database_tools_connection_oracle_database_result import ValidateDatabaseToolsConnectionOracleDatabaseResult
from .validate_database_tools_connection_result import ValidateDatabaseToolsConnectionResult
from .work_request import WorkRequest
from .work_request_collection import WorkRequestCollection
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_log_entry import WorkRequestLogEntry
from .work_request_log_entry_collection import WorkRequestLogEntryCollection
from .work_request_resource import WorkRequestResource
from .work_request_summary import WorkRequestSummary
# Maps type names to classes for database_tools services.
database_tools_type_mapping = {
"ChangeDatabaseToolsConnectionCompartmentDetails": ChangeDatabaseToolsConnectionCompartmentDetails,
"ChangeDatabaseToolsPrivateEndpointCompartmentDetails": ChangeDatabaseToolsPrivateEndpointCompartmentDetails,
"CreateDatabaseToolsConnectionDetails": CreateDatabaseToolsConnectionDetails,
"CreateDatabaseToolsConnectionOracleDatabaseDetails": CreateDatabaseToolsConnectionOracleDatabaseDetails,
"CreateDatabaseToolsPrivateEndpointDetails": CreateDatabaseToolsPrivateEndpointDetails,
"CreateDatabaseToolsRelatedResourceDetails": CreateDatabaseToolsRelatedResourceDetails,
"DatabaseToolsAllowedNetworkSources": DatabaseToolsAllowedNetworkSources,
"DatabaseToolsConnection": DatabaseToolsConnection,
"DatabaseToolsConnectionCollection": DatabaseToolsConnectionCollection,
"DatabaseToolsConnectionOracleDatabase": DatabaseToolsConnectionOracleDatabase,
"DatabaseToolsConnectionOracleDatabaseSummary": DatabaseToolsConnectionOracleDatabaseSummary,
"DatabaseToolsConnectionSummary": DatabaseToolsConnectionSummary,
"DatabaseToolsEndpointService": DatabaseToolsEndpointService,
"DatabaseToolsEndpointServiceCollection": DatabaseToolsEndpointServiceCollection,
"DatabaseToolsEndpointServiceSummary": DatabaseToolsEndpointServiceSummary,
"DatabaseToolsKeyStore": DatabaseToolsKeyStore,
"DatabaseToolsKeyStoreContent": DatabaseToolsKeyStoreContent,
"DatabaseToolsKeyStoreContentDetails": DatabaseToolsKeyStoreContentDetails,
"DatabaseToolsKeyStoreContentSecretId": DatabaseToolsKeyStoreContentSecretId,
"DatabaseToolsKeyStoreContentSecretIdDetails": DatabaseToolsKeyStoreContentSecretIdDetails,
"DatabaseToolsKeyStoreContentSecretIdSummary": DatabaseToolsKeyStoreContentSecretIdSummary,
"DatabaseToolsKeyStoreContentSummary": DatabaseToolsKeyStoreContentSummary,
"DatabaseToolsKeyStoreDetails": DatabaseToolsKeyStoreDetails,
"DatabaseToolsKeyStorePassword": DatabaseToolsKeyStorePassword,
"DatabaseToolsKeyStorePasswordDetails": DatabaseToolsKeyStorePasswordDetails,
"DatabaseToolsKeyStorePasswordSecretId": DatabaseToolsKeyStorePasswordSecretId,
"DatabaseToolsKeyStorePasswordSecretIdDetails": DatabaseToolsKeyStorePasswordSecretIdDetails,
"DatabaseToolsKeyStorePasswordSecretIdSummary": DatabaseToolsKeyStorePasswordSecretIdSummary,
"DatabaseToolsKeyStorePasswordSummary": DatabaseToolsKeyStorePasswordSummary,
"DatabaseToolsKeyStoreSummary": DatabaseToolsKeyStoreSummary,
"DatabaseToolsPrivateEndpoint": DatabaseToolsPrivateEndpoint,
"DatabaseToolsPrivateEndpointCollection": DatabaseToolsPrivateEndpointCollection,
"DatabaseToolsPrivateEndpointReverseConnectionConfiguration": DatabaseToolsPrivateEndpointReverseConnectionConfiguration,
"DatabaseToolsPrivateEndpointReverseConnectionsSourceIp": DatabaseToolsPrivateEndpointReverseConnectionsSourceIp,
"DatabaseToolsPrivateEndpointSummary": DatabaseToolsPrivateEndpointSummary,
"DatabaseToolsRelatedResource": DatabaseToolsRelatedResource,
"DatabaseToolsUserPassword": DatabaseToolsUserPassword,
"DatabaseToolsUserPasswordDetails": DatabaseToolsUserPasswordDetails,
"DatabaseToolsUserPasswordSecretId": DatabaseToolsUserPasswordSecretId,
"DatabaseToolsUserPasswordSecretIdDetails": DatabaseToolsUserPasswordSecretIdDetails,
"DatabaseToolsUserPasswordSecretIdSummary": DatabaseToolsUserPasswordSecretIdSummary,
"DatabaseToolsUserPasswordSummary": DatabaseToolsUserPasswordSummary,
"DatabaseToolsVirtualSource": DatabaseToolsVirtualSource,
"UpdateDatabaseToolsConnectionDetails": UpdateDatabaseToolsConnectionDetails,
"UpdateDatabaseToolsConnectionOracleDatabaseDetails": UpdateDatabaseToolsConnectionOracleDatabaseDetails,
"UpdateDatabaseToolsPrivateEndpointDetails": UpdateDatabaseToolsPrivateEndpointDetails,
"UpdateDatabaseToolsRelatedResourceDetails": UpdateDatabaseToolsRelatedResourceDetails,
"ValidateDatabaseToolsConnectionDetails": ValidateDatabaseToolsConnectionDetails,
"ValidateDatabaseToolsConnectionOracleDatabaseDetails": ValidateDatabaseToolsConnectionOracleDatabaseDetails,
"ValidateDatabaseToolsConnectionOracleDatabaseResult": ValidateDatabaseToolsConnectionOracleDatabaseResult,
"ValidateDatabaseToolsConnectionResult": ValidateDatabaseToolsConnectionResult,
"WorkRequest": WorkRequest,
"WorkRequestCollection": WorkRequestCollection,
"WorkRequestError": WorkRequestError,
"WorkRequestErrorCollection": WorkRequestErrorCollection,
"WorkRequestLogEntry": WorkRequestLogEntry,
"WorkRequestLogEntryCollection": WorkRequestLogEntryCollection,
"WorkRequestResource": WorkRequestResource,
"WorkRequestSummary": WorkRequestSummary
} | from .database_tools_user_password_secret_id_summary import DatabaseToolsUserPasswordSecretIdSummary
from .database_tools_user_password_summary import DatabaseToolsUserPasswordSummary
from .database_tools_virtual_source import DatabaseToolsVirtualSource |
api_op_CreateNotificationSubscription.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package workdocs
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/workdocs/types"
smithy "github.com/awslabs/smithy-go"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Configure Amazon WorkDocs to use Amazon SNS notifications. The endpoint receives
// a confirmation message, and must confirm the subscription. For more information,
// see Subscribe to Notifications
// (https://docs.aws.amazon.com/workdocs/latest/developerguide/subscribe-notifications.html)
// in the Amazon WorkDocs Developer Guide.
func (c *Client) CreateNotificationSubscription(ctx context.Context, params *CreateNotificationSubscriptionInput, optFns ...func(*Options)) (*CreateNotificationSubscriptionOutput, error) {
stack := middleware.NewStack("CreateNotificationSubscription", smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
addawsRestjson1_serdeOpCreateNotificationSubscriptionMiddlewares(stack)
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpCreateNotificationSubscriptionValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opCreateNotificationSubscription(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err := handler.Handle(ctx, params)
if err != nil {
return nil, &smithy.OperationError{
ServiceID: ServiceID,
OperationName: "CreateNotificationSubscription",
Err: err,
}
}
out := result.(*CreateNotificationSubscriptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateNotificationSubscriptionInput struct {
// The endpoint to receive the notifications. If the protocol is HTTPS, the
// endpoint is a URL that begins with https.
//
// This member is required.
Endpoint *string
// The ID of the organization.
//
// This member is required.
OrganizationId *string
// The protocol to use. The supported value is https, which delivers JSON-encoded
// messages using HTTPS POST.
//
// This member is required.
Protocol types.SubscriptionProtocolType
// The notification type.
//
// This member is required.
SubscriptionType types.SubscriptionType
}
type CreateNotificationSubscriptionOutput struct {
// The subscription.
Subscription *types.Subscription
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func | (stack *middleware.Stack) {
stack.Serialize.Add(&awsRestjson1_serializeOpCreateNotificationSubscription{}, middleware.After)
stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateNotificationSubscription{}, middleware.After)
}
func newServiceMetadataMiddleware_opCreateNotificationSubscription(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "workdocs",
OperationName: "CreateNotificationSubscription",
}
}
| addawsRestjson1_serdeOpCreateNotificationSubscriptionMiddlewares |
FortmaticConnector.ts | import { ChainId } from '@mitz/schems'
import { FortmaticConnector as BaseFortmaticConnector } from '@web3-react/fortmatic-connector'
import { getConfiguration } from '../configuration'
import { ProviderType } from '../types'
export class | extends BaseFortmaticConnector {
apiKeys: Record<number, string>
constructor(chainId: ChainId) {
const { apiKeys } = getConfiguration()[ProviderType.FORTMATIC]
super({ chainId, apiKey: apiKeys[chainId] })
this.apiKeys = apiKeys
}
public async getApiKey(): Promise<string> {
const chainId = await this.getChainId()
return this.apiKeys[chainId]
}
}
| FortmaticConnector |
localization.rs | use crate::{
app::AppBuilder,
assets::{
asset::AssetId, database::AssetsDatabase, protocols::localization::LocalizationAsset,
},
ecs::{
pipeline::{PipelineBuilder, PipelineBuilderError},
Universe,
},
};
use pest::{iterators::Pair, Parser};
use std::collections::HashMap;
#[allow(clippy::upper_case_acronyms)]
mod parser {
#[derive(Parser)]
#[grammar = "localization.pest"]
pub(crate) struct SentenceParser;
}
#[derive(Default)]
pub struct Localization {
default_language: Option<String>,
current_language: Option<String>,
/// { text id: { language: text format } }
map: HashMap<String, HashMap<String, String>>,
}
impl Localization {
pub fn default_language(&self) -> Option<&str> {
self.default_language.as_deref()
}
pub fn set_default_language(&mut self, value: Option<String>) {
self.default_language = value;
}
pub fn current_language(&self) -> Option<&str> {
self.current_language.as_deref()
}
pub fn set_current_language(&mut self, value: Option<String>) {
self.current_language = value.clone();
if self.default_language.is_none() && value.is_some() {
self.default_language = value;
}
}
pub fn add_text(&mut self, id: &str, language: &str, text_format: &str) {
if let Some(map) = self.map.get_mut(id) {
map.insert(language.to_owned(), text_format.to_owned());
} else {
let mut map = HashMap::new();
map.insert(language.to_owned(), text_format.to_owned());
self.map.insert(id.to_owned(), map);
}
}
pub fn remove_text(&mut self, id: &str, language: &str) -> bool {
let (empty, removed) = if let Some(map) = self.map.get_mut(id) {
let removed = map.remove(language).is_some();
let empty = map.is_empty();
(empty, removed)
} else {
(false, false)
};
if empty {
self.map.remove(id);
}
removed
}
pub fn remove_text_all(&mut self, id: &str) -> bool {
self.map.remove(id).is_some()
}
pub fn remove_language(&mut self, lang: &str) {
for map in self.map.values_mut() {
map.remove(lang);
}
}
pub fn find_text_format(&self, id: &str) -> Option<&str> {
if let Some(current) = &self.current_language {
if let Some(default) = &self.default_language {
if let Some(map) = self.map.get(id) {
return map
.get(current)
.or_else(|| map.get(default))
.or(None)
.as_ref()
.map(|v| v.as_str());
}
}
}
None
}
pub fn format_text(&self, id: &str, params: &[(&str, &str)]) -> Result<String, String> {
if let Some(text_format) = self.find_text_format(id) {
match parser::SentenceParser::parse(parser::Rule::sentence, text_format) {
Ok(mut ast) => {
let pair = ast.next().unwrap();
match pair.as_rule() {
parser::Rule::sentence => Ok(Self::parse_sentence_inner(pair, params)),
_ => unreachable!(), | }
Err(error) => Err(error.to_string()),
}
} else {
Err(format!("There is no text format for id: {}", id))
}
}
fn parse_sentence_inner(pair: Pair<parser::Rule>, params: &[(&str, &str)]) -> String {
let mut result = String::new();
for p in pair.into_inner() {
match p.as_rule() {
parser::Rule::text => result.push_str(&p.as_str().replace("\\|", "|")),
parser::Rule::identifier => {
let ident = p.as_str();
if let Some((_, v)) = params.iter().find(|(id, _)| id == &ident) {
result.push_str(v);
} else {
result.push_str(&format!("{{@{}}}", ident));
}
}
_ => {}
}
}
result
}
}
#[macro_export]
macro_rules! localization_format_text {
($res:expr, $text:expr, $( $id:ident => $value:expr ),*) => {
$crate::localization::Localization::format_text(
&$res,
$text,
&[ $( (stringify!($id), &$value.to_string()) ),* ]
)
}
}
#[derive(Default)]
pub struct LocalizationSystemCache {
language_table: HashMap<AssetId, String>,
}
pub type LocalizationSystemResources<'a> = (
&'a AssetsDatabase,
&'a mut Localization,
&'a mut LocalizationSystemCache,
);
pub fn localization_system(universe: &mut Universe) {
let (assets, mut localization, mut cache) =
universe.query_resources::<LocalizationSystemResources>();
for id in assets.lately_loaded_protocol("locals") {
let id = *id;
let asset = assets
.asset_by_id(id)
.expect("trying to use not loaded localization asset");
let asset = asset
.get::<LocalizationAsset>()
.expect("trying to use non-localization asset");
for (k, v) in &asset.dictionary {
localization.add_text(k, &asset.language, v);
}
cache.language_table.insert(id, asset.language.clone());
}
for id in assets.lately_unloaded_protocol("locals") {
if let Some(name) = cache.language_table.remove(id) {
localization.remove_language(&name);
}
}
}
pub fn bundle_installer<PB, PMS>(
builder: &mut AppBuilder<PB>,
_: (),
) -> Result<(), PipelineBuilderError>
where
PB: PipelineBuilder,
{
builder.install_resource(Localization::default());
builder.install_resource(LocalizationSystemCache::default());
builder.install_system::<LocalizationSystemResources>(
"localization",
localization_system,
&[],
)?;
Ok(())
} | } |
chunkLigting.ts | import { getIndex } from '../../../../common/chunk';
import { SIGHT_TRANSPARENT, blocksFlags } from '../../../blocks/blockInfo';
import {
ROW,
ROW_NESTED_CHUNK,
COLUMN,
COLUMN_NESTED_CHUNK,
SLICE,
} from '../../../../common/constants/chunk';
import {
CHUNK_STATUS_NEED_LOAD_VBO,
CHUNK_STATUS_LOADED,
} from '../../../Terrain/Chunk/chunkConstants';
import type Chunk from './Chunk';
const calcRecursion = (
mask: number,
reversedMask: number,
dec: number,
mode: number, // TODO boolean?
) => {
const calcCurrent = (chunk: Chunk, x: number, y: number, z: number, index: number): number =>
(chunk.light[index] & reversedMask) |
(Math.max(
(mode ? chunk.light[index] & mask : 0) + dec,
(z < 15 ? chunk.light[index + ROW] : chunk.eastChunk.light[index - ROW_NESTED_CHUNK]) & mask,
(z > 0 ? chunk.light[index - ROW] : chunk.westChunk.light[index + ROW_NESTED_CHUNK]) & mask,
(x < 15 ? chunk.light[index + COLUMN] : chunk.southChunk.light[index - COLUMN_NESTED_CHUNK]) &
mask,
(x > 0 ? chunk.light[index - COLUMN] : chunk.northChunk.light[index + COLUMN_NESTED_CHUNK]) &
mask,
chunk.light[index + SLICE] & mask,
chunk.light[index - SLICE] & mask,
) -
dec);
| if (lightTmp > (chunk.light[index] & mask)) {
if (chunk.state === CHUNK_STATUS_LOADED) {
chunk.state = CHUNK_STATUS_NEED_LOAD_VBO;
}
chunk.light[index] = (chunk.light[index] & reversedMask) | lightTmp;
calcRecursionInternal(chunk, ...params);
}
};
const updateIfLightRemove = (index, lightTmp, chunk: Chunk, ...params) =>
lightTmp > (chunk.light[index] & mask) && calcRecursionRemoveInternal(chunk, ...params);
const updateIfLight = mode ? updateIfLightAdd : updateIfLightRemove;
const calcNear = (
chunk: Chunk,
x: number,
y: number,
z: number,
index: number,
lightTmp: number,
limit: number,
) => (
z < 15
? updateIfLight(index + ROW, lightTmp, chunk, x, y, z + 1, limit)
: updateIfLight(index - ROW_NESTED_CHUNK, lightTmp, chunk.eastChunk, x, y, 0, limit),
z > 0
? updateIfLight(index - ROW, lightTmp, chunk, x, y, z - 1, limit)
: updateIfLight(index + ROW_NESTED_CHUNK, lightTmp, chunk.westChunk, x, y, 15, limit),
x < 15
? updateIfLight(index + COLUMN, lightTmp, chunk, x + 1, y, z, limit)
: updateIfLight(index - COLUMN_NESTED_CHUNK, lightTmp, chunk.southChunk, 0, y, z, limit),
x > 0
? updateIfLight(index - COLUMN, lightTmp, chunk, x - 1, y, z, limit)
: updateIfLight(index + COLUMN_NESTED_CHUNK, lightTmp, chunk.northChunk, 15, y, z, limit),
y < 255 && updateIfLight(index + SLICE, lightTmp, chunk, x, y + 1, z, limit),
y > 0 && updateIfLight(index - SLICE, lightTmp, chunk, x, y - 1, z, limit)
);
const calcRecursionInternal = (chunk: Chunk, x: number, y: number, z: number, limit = 0) => {
if (!limit) {
return;
}
const index = getIndex(x, y, z);
if (chunk.blocks[index] && !blocksFlags[chunk.blocks[index]][SIGHT_TRANSPARENT]) {
chunk.light[index] &= reversedMask;
} else {
chunk.light[index] = calcCurrent(chunk, x, y, z, index);
calcNear(chunk, x, y, z, index, (chunk.light[index] & mask) - dec, limit - 1);
}
};
const calcRecursionRemoveInternal = (
chunk: Chunk,
x: number,
y: number,
z: number,
limit = 0,
) => {
if (!limit) {
return;
}
const index = getIndex(x, y, z);
const lightTmp = chunk.light[index];
if (chunk.blocks[index] && !blocksFlags[chunk.blocks[index]][SIGHT_TRANSPARENT]) {
chunk.light[index] &= reversedMask;
} else {
chunk.light[index] = calcCurrent(chunk, x, y, z, index);
if (chunk.state === CHUNK_STATUS_LOADED) {
chunk.state = CHUNK_STATUS_NEED_LOAD_VBO;
}
}
if (chunk.light[index] !== lightTmp) {
calcNear(chunk, x, y, z, index, chunk.light[index] & mask, limit - 1);
}
};
return mode ? calcRecursionInternal : calcRecursionRemoveInternal;
};
export const calcRecursionRedRemove = calcRecursion(0xf000, 0x0fff, 0x1000, 0);
export const calcRecursionGreenRemove = calcRecursion(0x0f00, 0xf0ff, 0x0100, 0);
export const calcRecursionBlueRemove = calcRecursion(0x00f0, 0xff0f, 0x0010, 0);
export const calcRecursionGlobalRemove = calcRecursion(0x000f, 0xfff0, 0x0001, 0);
export const calcRecursionRed = calcRecursion(0xf000, 0x0fff, 0x1000, 1);
export const calcRecursionGreen = calcRecursion(0x0f00, 0xf0ff, 0x0100, 1);
export const calcRecursionBlue = calcRecursion(0x00f0, 0xff0f, 0x0010, 1);
export const calcRecursionGlobal = calcRecursion(0x000f, 0xfff0, 0x0001, 1); | const updateIfLightAdd = (index: number, lightTmp, chunk: Chunk, ...params) => { |
gulpfile.js | //@ts-check
const gulp = require('gulp');
const ts = require('gulp-typescript');
const babel = require('gulp-babel');
const uglify = require('gulp-uglify');
const pump = require('pump');
const rename = require('gulp-rename');
const tsProject = ts.createProject('tsconfig.json');
const buildDir = './dist'
gulp.task('ts', function () {
return gulp.src('./src/**/*.ts')
.pipe(tsProject())
.pipe(babel({
presets : ['babel-preset-env']
}))
.pipe(gulp.dest(buildDir));
});
gulp.task('tsmin', (cb) => {
pump([
gulp.src('./src/**/*.ts'),
tsProject(),
babel({
presets : ['babel-preset-env']
}),
uglify(),
rename({
suffix: '-min' | ],
cb
)
// )
// .pipe()
// .pipe()
// .pipe(rename({
// suffix: "-min"
// }))
// .pipe(gulp.dest(buildDir));
}) | }),
gulp.dest(buildDir) |
list_test.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package quotas
import (
"bytes"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/google/kf/pkg/kf/commands/config"
"github.com/google/kf/pkg/kf/commands/utils"
"github.com/google/kf/pkg/kf/quotas/fake"
"github.com/google/kf/pkg/kf/testutil"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestListQuotasCommand(t *testing.T) {
t.Parallel()
for tn, tc := range map[string]struct { | namespace string
wantErr error
args []string
setup func(t *testing.T, fakeLister *fake.FakeClient)
assert func(t *testing.T, buffer *bytes.Buffer)
}{
"invalid number of args": {
args: []string{"invalid"},
wantErr: errors.New("accepts 0 arg(s), received 1"),
},
"configured namespace": {
namespace: "some-namespace",
setup: func(t *testing.T, fakeLister *fake.FakeClient) {
fakeLister.
EXPECT().
List("some-namespace")
},
},
"returns error without specify namespace": {
wantErr: errors.New(utils.EmptyNamespaceError),
setup: func(t *testing.T, fakeLister *fake.FakeClient) {
fakeLister.
EXPECT().
List("some-namespace")
},
},
"formats multiple quotas": {
namespace: "some-namespace",
setup: func(t *testing.T, fakeLister *fake.FakeClient) {
fakeLister.
EXPECT().
List(gomock.Any()).
Return([]v1.ResourceQuota{
{ObjectMeta: metav1.ObjectMeta{Name: "quota-a"}},
{ObjectMeta: metav1.ObjectMeta{Name: "quota-b"}},
}, nil)
},
assert: func(t *testing.T, buffer *bytes.Buffer) {
header1 := "Getting quotas in namespace: "
header2 := "Found 2 quotas in namespace "
testutil.AssertContainsAll(t, buffer.String(), []string{header1, header2, "quota-a", "quota-b"})
},
},
} {
t.Run(tn, func(t *testing.T) {
ctrl := gomock.NewController(t)
fakeLister := fake.NewFakeClient(ctrl)
if tc.setup != nil {
tc.setup(t, fakeLister)
}
buffer := &bytes.Buffer{}
c := NewListQuotasCommand(&config.KfParams{
Namespace: tc.namespace,
}, fakeLister)
c.SetOutput(buffer)
c.SetArgs(tc.args)
gotErr := c.Execute()
if tc.wantErr != nil {
testutil.AssertErrorsEqual(t, tc.wantErr, gotErr)
return
}
if tc.assert != nil {
tc.assert(t, buffer)
}
testutil.AssertNil(t, "Command err", gotErr)
ctrl.Finish()
})
}
} | |
powerup_system.rs | use crate::components::{MyWorld, PlayerBox, PowerUp, BULLETTYPE, POWERUPTYPE};
use bevy::prelude::*;
pub fn powerup_system(
mut commands: Commands,
asset_server: Res<AssetServer>,
my_world: Res<MyWorld>,
time: Res<Time>,
audio: Res<Audio>,
mut query_bullets: Query<(Entity, &mut PowerUp, &mut Transform)>,
mut query_player: Query<(Entity, &mut PlayerBox)>,
) {
let (_player_entity, mut player_box) = query_player.get_mut(my_world.player.unwrap()).unwrap();
for (powerup_entity, mut powerup, mut transform) in query_bullets.iter_mut() {
if powerup.col_shape.overlaps(player_box.col_shape) {
println!("powerup");
match powerup.powerup_type {
POWERUPTYPE::Health => {
player_box.system.add_healthpack(10.0);
} | }
POWERUPTYPE::WeaponMultiFire => {
player_box.weapons.change(BULLETTYPE::Spread);
}
}
let sfx: bevy::prelude::Handle<bevy::prelude::AudioSource> =
asset_server.load("sounds/sfx/sfx_sound_neutral7.ogg");
audio.play(sfx);
commands.despawn(powerup_entity);
}
use rand::{thread_rng, Rng};
let mut rng = thread_rng();
let modx = rng.gen_range(-1.0, 1.0);
let mody = rng.gen_range(-1.0, 1.0);
*transform.translation.x_mut() += ((powerup.angle as f64).cos() * 1.0) as f32 * 2.0 + modx;
*transform.translation.y_mut() += ((powerup.angle as f64).sin() * 1.0) as f32 * 2.0 + mody;
powerup.col_shape.update(
transform.translation.x().clone(),
transform.translation.y().clone(),
(transform.scale[0].clone()) * 1.0 as f32,
(transform.scale[1].clone()) * 1.0 as f32,
);
//powerup.angle
powerup.time_remaining -= time.delta_seconds;
if powerup.time_remaining <= 0.0 {
commands.despawn(powerup_entity);
}
}
} | POWERUPTYPE::WeaponIncreaseBullets => {
player_box.weapons.max_bullets += 10; |
models.py | from django.db import models
from django.utils import timezone
from django.contrib.auth import get_user_model
# Create your models here.
class Post(models.Model):
| author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
created = models.DateTimeField('Created Date', default=timezone.now)
title = models.CharField('Title', max_length=200)
content = models.TextField('Content')
slug = models.SlugField('Slug')
view_count = models.IntegerField("View Count", default=0)
def __str__(self):
return '"%s" by %s' % (self.title, self.author) |
|
language.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import re
from eiseg import pjpath
from collections import defaultdict
import json
from urllib import parse
import requests
class TransUI(object):
def __init__(self, is_trans=False):
super().__init__()
self.trans_dict = defaultdict(dict)
with open(
osp.join(pjpath, "config/zh_CN.EN"), "r",
encoding="utf-8") as f:
texts = f.readlines()
for txt in texts:
strs = txt.split("@")
self.trans_dict[strs[0].strip()] = strs[1].strip()
self.is_trans = is_trans
self.youdao_url = "http://fanyi.youdao.com/translate?&doctype=json&type=AUTO&i="
def put(self, zh_CN):
if self.is_trans == False:
return zh_CN
else:
try:
return str(self.trans_dict[zh_CN])
except:
return zh_CN
# ่็ฝๅจๆ็ฟป่ฏ
def tr(self, zh_CN):
try:
| tr_url = self.youdao_url + parse.quote(zh_CN)
response = requests.get(tr_url)
js = json.loads(response.text)
result_EN = js["translateResult"][0][0]["tgt"]
return str(result_EN)
except:
return zh_CN
|
|
test_libvirt.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import eventlet
import fixtures
import json
import mox
import os
import re
import shutil
import tempfile
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import utils
from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
try:
import libvirt
except ImportError:
import nova.tests.virt.libvirt.fakelibvirt as libvirt
libvirt_driver.libvirt = libvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None):
self.uuidstr = uuidstr
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
return "fake-domain %s" % self
def info(self):
return [power_state.RUNNING, None, None, None, None]
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
self.flags(instances_path='')
self.flags(libvirt_snapshots_directory='')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
# Force libvirt to return a host UUID that matches the serial in
# nova.tests.fakelibvirt. This is necessary because the host UUID
# returned by libvirt becomes the serial whose value is checked for in
# test_xml_and_uri_* below.
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver.get_host_uuid',
lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
class FakeConn():
def getCapabilities(self):
return """<capabilities>
<host><cpu><arch>x86_64</arch></cpu></host>
</capabilities>"""
def getLibVersion(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.conn = FakeConn()
self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
lambda *a, **k: self.conn)
instance_type = db.instance_type_get(self.context, 5)
sys_meta = flavors.save_instance_type_info({}, instance_type)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': sys_meta}
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ('iscsi=nova.tests.virt.libvirt.test_libvirt'
'.FakeVolumeDriver')
self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = conn.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 7)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.clock),
vconfig.LibvirtConfigGuestClock)
self.assertEquals(cfg.clock.offset, "utc")
self.assertEquals(len(cfg.clock.timers), 2)
self.assertEquals(type(cfg.clock.timers[0]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(type(cfg.clock.timers[1]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(cfg.clock.timers[0].name, "pit")
self.assertEquals(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEquals(cfg.clock.timers[1].name, "rtc")
self.assertEquals(cfg.clock.timers[1].tickpolicy,
"catchup")
def test_get_guest_config_with_two_nics(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_bug_1118829(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertEquals(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEquals(cfg.acpi, False)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, "uml")
self.assertEquals(cfg.os_boot_dev, None)
self.assertEquals(cfg.os_root, '/dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_config_with_configdrive(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
# make configdrive.enabled_for() return True
instance_ref['config_drive'] = 'ANY_ID'
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdz')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=False)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 5)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=False,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[6].type, "vnc")
self.assertEquals(cfg.devices[7].type, "spice")
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(libvirt_type="kvm",
libvirt_cpu_mode=None)
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_default_uml(self):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_host_model_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_custom_new(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
self.assertEquals(conf.cpu.model, "Penryn")
def test_get_guest_cpu_config_host_passthrough_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
None,
disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
self.assertEquals(len(conf.cpu.features), 2)
self.assertEquals(conf.cpu.features[0].name, "tm2")
self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub():
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(self.conn,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Penryn")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(("cdrom", "ide", "hda"),))
def test_xml_disk_bus_ide_and_virtio(self):
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(("cdrom", "ide", "hda"),
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
def test_list_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
def test_list_defined_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: [1]
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one defined domain should be listed
self.assertEquals(len(instances), 1)
def test_list_instances_when_instance_deleted(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
libvirt.libvirtError.get_error_code().AndReturn(
libvirt.VIR_ERR_NO_DOMAIN)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEquals(len(instances), 0)
def test_get_all_block_devices(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
def test_get_disks(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_metadata_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
image_service = nova.tests.image.fake.FakeImageService()
# Assign an image with an architecture defined (x86_64)
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id),
'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b'}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['architecture'], 'fake_arch')
self.assertEquals(snapshot['properties']['key_a'], 'value_a')
self.assertEquals(snapshot['properties']['key_b'], 'value_b')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
"/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
self.assertEquals(interfaces[0].get('type'), 'bridge')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix=None):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'sda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
if prefix:
self.flags(libvirt_disk_prefix=prefix)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=None, expect_xen_hvm=False, xen_only=False):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project_id)[0]
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.XEN)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
if expect_xen_hvm:
type_uri_map = {}
type_uri_map['xen'] = ('xen:///',
[(lambda t: t.find('.').get('type'),
'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM)])
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
# Hypervisors that only support vm_mode.HVM should
# not produce configuration that results in kernel
# arguments
if not expect_kernel and hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial/source')[0].get(
'path').split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[1].get('file').split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(instance_ref, network_info, disk_info,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
try:
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(conn.firewall_driver,
'instance_filter_exists',
fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time_module=fake_timer)
except exception.NovaException, e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= str(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['uuid'])
def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
self.assertThat({"filename": "file",
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
reason='foo')
)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidCPUInfo,
conn.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
conn.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def test_check_can_live_migrate_source_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
conn._assert_dest_node_has_enough_disk(self.context, instance_ref,
dest_check_data['disk_available_mb'],
False)
self.mox.ReplayAll()
conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
def test_check_can_live_migrate_source_vol_backed_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": True}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
ret = conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
self.assertTrue(type(ret) == dict)
self.assertTrue('is_shared_storage' in ret)
def test_check_can_live_migrate_source_vol_backed_fails(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source, self.context,
instance_ref, dest_check_data)
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_no_shared_storage_no_blck_mig_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
'[{"virt_disk_size":2}]')
dest_check_data = {"filename": "file",
"disk_available_mb": 0,
"block_migration": True,
"disk_over_commit": False}
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.live_migration_bandwidth
vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
#start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
self.compute._rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = {'id': 'foo'}
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
result = conn.pre_live_migration(c, inst_ref, vol, nw_info)
self.assertEqual(result, None)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = db.instance_create(self.context, self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False,
'is_volume_backed': True,
'block_migration': False
}
ret = conn.pre_live_migration(c, inst_ref, vol, nw_info,
migrate_data)
self.assertEqual(ret, None)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['uuid'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_block_migration_works_correctly(self):
# Replace instances_path since this testcase creates tmpfile
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummy_info = [{'path': '%s/disk' % tmpdir,
'disk_size': 10737418240,
'type': 'raw',
'backing_file': ''},
{'backing_file': 'otherdisk_1234567',
'path': '%s/otherdisk' % tmpdir,
'virt_disk_size': 10737418240}]
dummyjson = json.dumps(dummy_info)
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename='otherdisk',
image_id=self.test_instance['image_ref'],
project_id='fake',
size=10737418240L,
user_id=None).AndReturn(None)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson)
self.assertTrue(os.path.exists('%s/%s/' %
(tmpdir, instance_ref['uuid'])))
db.instance_destroy(self.context, instance_ref['uuid'])
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'])
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
self.assertEquals(info[0]['over_committed_disk_size'], 0)
self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[1]['path'], '/test/disk.local')
self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9007
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance_type = db.instance_type_get(self.context,
instance_ref['instance_type_id'])
sys_meta = flavors.save_instance_type_info({}, instance_type)
instance_ref['system_metadata'] = sys_meta
instance = db.instance_create(self.context, instance_ref)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn({'state': power_state.RUNNING})
# Start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.base_dir_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_create_image)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.create_image_called)
conn.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda'}]}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
def test_create_image_plain(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
{'filename': 'swap_500',
'size': 500 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
(libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
conn._wrapped_conn.getLibVersion().AndRaise(
libvirt.libvirtError("fake failure"))
libvirt.libvirtError.get_error_code().AndReturn(error)
libvirt.libvirtError.get_error_domain().AndReturn(domain)
self.mox.ReplayAll()
self.assertFalse(conn._test_connection())
self.mox.UnsetStubs()
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def | (self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndReturn(1)
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_with_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_no_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name):
return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": ["extapic", "3dnow"],
"model": "Opteron_G4",
"arch": "x86_64",
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = jsonutils.loads(conn.get_cpu_info())
self.assertEqual(want, got)
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise libvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise libvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
raise libvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
}
self.assertEqual(actual, expect)
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
raise libvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_failing_vcpu_count(self):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
return None
else:
return ([1] * self._vcpus, [True] * self._vcpus)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
conn.lookupByID = self.mox.CreateMockAnything()
driver.list_instance_ids().AndReturn([1, 2])
conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
self.mox.ReplayAll()
self.assertEqual(5, driver.get_vcpu_used())
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = [('x86_64', 'kvm', 'hvm'),
('x86_64', 'qemu', 'hvm'),
('i686', 'kvm', 'hvm')]
got = conn.get_instance_capabilities()
self.assertEqual(want, got)
def test_event_dispatch(self):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
conn._queue_event(event1)
conn._queue_event(event2)
conn._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
conn._queue_event(event3)
conn._queue_event(event4)
conn._dispatch_events()
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_lifecycle(self):
# Validate that libvirt events are correctly translated
# to Nova events
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_dom_xml,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
conn._event_lifecycle_callback(conn._conn,
dom,
libvirt.VIR_DOMAIN_EVENT_STOPPED,
0,
conn)
conn._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertEqual(type(got_events[0]), virtevent.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
virtevent.EVENT_LIFECYCLE_STOPPED)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, None)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
def _test_shared_storage_detection(self, is_same):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('bar')
utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
if is_same:
os.unlink(mox.IgnoreArg())
else:
utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
self.mox.ReplayAll()
return conn._is_storage_shared_with('foo', '/path')
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
class HostStateTestCase(test.TestCase):
cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
'"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
'"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
'"mtrr", "sep", "apic"], '
'"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
"""Fake connection object."""
def get_vcpu_total(self):
return 1
def get_vcpu_used(self):
return 0
def get_cpu_info(self):
return HostStateTestCase.cpu_info
def get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
def get_memory_mb_used(self):
return 88
def get_hypervisor_type(self):
return 'QEMU'
def get_hypervisor_version(self):
return 13091
def get_hypervisor_hostname(self):
return 'compute1'
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def get_disk_available_least(self):
return 13091
def get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def test_update_status(self):
hs = libvirt_driver.HostState(self.FakeConnection())
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
self.assertEquals(stats["cpu_info"],
{"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEquals(stats["disk_total"], 100)
self.assertEquals(stats["disk_used"], 20)
self.assertEquals(stats["disk_available"], 80)
self.assertEquals(stats["host_memory_total"], 497)
self.assertEquals(stats["host_memory_free"], 409)
self.assertEquals(stats["hypervisor_type"], 'QEMU')
self.assertEquals(stats["hypervisor_version"], 13091)
self.assertEquals(stats["hypervisor_hostname"], 'compute1')
class NWFilterFakes:
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise libvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
def __init__(self, parent, name, xml):
self.name = name
self.parent = parent
self.xml = xml
def undefine(self):
del self.parent.filters[self.name]
pass
tree = etree.fromstring(xml)
name = tree.get('name')
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, xml)
return True
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
class FakeLibvirtDriver(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self.stubs, 1, spectacular=True)
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
_fake_stub_out_get_nw_info(self.stubs, lambda *a, **kw: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.fw.do_refresh_security_group_rules("fake")
def test_unfilter_instance_undefines_nwfilter(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
_xml_mock = fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
_lookup_name = fakefilter.nwfilterLookupByName
self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def _create_instance(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def _create_instance_type(self, params=None):
"""Create a test instance."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['root_gb'] = '10'
inst['ephemeral_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_factor'] = 1
inst.update(params)
return db.instance_type_create(context, inst)['id']
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate(None, ':'))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
for required in requiredlist:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0][1]['mac']
self.fw.setup_basic_filtering(instance, network_info)
allow_dhcp = False
for (network, mapping) in network_info:
if mapping['dhcp_server']:
allow_dhcp = True
break
_ensure_all_called(mac, allow_dhcp)
db.instance_remove_security_group(self.context, inst_uuid,
self.security_group['id'])
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['uuid'])
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_nwfilter_parameters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = mapping['dhcp_server']
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = mapping.get('gateway_v6') + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
class LibvirtUtilsTestCase(test.TestCase):
def test_get_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
initiator = 'fake.initiator.iqn'
rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True).AndReturn(rval)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertEqual(initiator, result)
def test_create_image(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G')
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
def test_create_cow_image(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
rval = ('', '')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path').AndReturn(rval)
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'xen': ([True, 'phy'], [False, 'tap'], [None, 'tap']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
for (libvirt_type, checks) in type_map.iteritems():
self.flags(libvirt_type=libvirt_type)
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(is_block_dev)
self.assertEquals(result, expected_result)
def test_get_disk_size(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/some/path').AndReturn(('''image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M''', ''))
# Start test
self.mox.ReplayAll()
self.assertEquals(disk.get_disk_size('/some/path'), 4592640)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0277)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEquals(mode & 0277, 0)
finally:
os.unlink(dst_path)
def test_chown(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('chown', 'soren', '/some/path', run_as_root=True)
self.mox.ReplayAll()
libvirt_utils.chown('/some/path', 'soren')
def _do_test_extract_snapshot(self, dest_format='raw', out_format='raw'):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
'-s', 'snap1', '/path/to/disk/image', '/extracted/snap')
# Start test
self.mox.ReplayAll()
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
'snap1', '/extracted/snap', dest_format)
def test_extract_snapshot_raw(self):
self._do_test_extract_snapshot()
def test_extract_snapshot_iso(self):
self._do_test_extract_snapshot(dest_format='iso')
def test_extract_snapshot_qcow2(self):
self._do_test_extract_snapshot(dest_format='qcow2', out_format='qcow2')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEquals(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stubs.Set(os, 'statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEquals('/some/file/path', self.path)
self.assertEquals(8192000, fs_info['total'])
self.assertEquals(3686400, fs_info['free'])
self.assertEquals(4096000, fs_info['used'])
def test_fetch_image(self):
self.mox.StubOutWithMock(images, 'fetch_to_raw')
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.mox.ReplayAll()
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_errror(path):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(utils, 'delete_if_exists', fake_rm_on_errror)
context = 'opaque context'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.libvirtconnection = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
sys_meta = flavors.save_instance_type_info(
{}, flavors.get_instance_type_by_name('m1.tiny'))
inst = {}
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
type_id = flavors.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = 10
inst['ephemeral_gb'] = 20
inst['config_drive'] = 1
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['config_drive_id'] = 1
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = sys_meta
inst.update(params)
return db.instance_create(context.get_admin_context(), inst)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance, xml=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
self.assertRaises(AssertionError,
self.libvirtconnection.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.2', None, None)
def test_migrate_disk_and_power_off(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'},
{'type': 'raw', 'path': '/test/disk.local',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk.local',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_get_instance_disk_info(instance, xml=None):
return disk_info_text
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
# dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEquals(out, disk_info_text)
# dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEquals(out, disk_info_text)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.NotFound
elif instance['name'] == "running":
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.NotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_finish_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'local_gb': 10, 'backing_file': '/base/disk'},
{'type': 'raw', 'path': '/test/disk.local',
'local_gb': 10, 'backing_file': '/base/disk.local'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_can_resize_fs(path, size, use_cow=False):
return False
def fake_extend(path, size):
pass
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
self.flags(use_cow_images=True)
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(libvirt_driver.disk, 'can_resize_fs',
fake_can_resize_fs)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
fake_create_image)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
self.libvirtconnection.finish_migration(
context.get_admin_context(), None, ins_ref,
disk_info_text, None, None, None)
def test_finish_revert_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration. """
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.libvirtconnection.finish_revert_migration(ins_ref, None)
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.libvirtconnection, 'to_xml', lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
lambda *a: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
os.path.exists('/fake/foo').AndReturn(new_made)
if new_made:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.libvirtconnection.finish_revert_migration({}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.libvirtconnection._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.stubs.Set(self.libvirtconnection, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.libvirtconnection.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance_name = "fake-instance-name"
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, *args):
raise libvirt.libvirtError("Libvirt error")
def fake_lookup_by_name(instance_name):
return FakeExceptionDomain()
self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
fake_lookup_by_name)
self.assertRaises(exception.InstanceNotFound,
self.libvirtconnection.get_instance_disk_info,
instance_name)
def test_get_cpuset_ids(self):
# correct syntax
self.flags(vcpu_pin_set="1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set="1,2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2], cpuset_ids)
self.flags(vcpu_pin_set=", , 1 , ,, 2, ,")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2], cpuset_ids)
self.flags(vcpu_pin_set="1-1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set=" 1 - 1, 1 - 2 , 1 -3")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 2, 3], cpuset_ids)
self.flags(vcpu_pin_set="1,^2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1], cpuset_ids)
self.flags(vcpu_pin_set="1-2, ^1")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([2], cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 3, 5], cpuset_ids)
self.flags(vcpu_pin_set=" 1 - 3 , ^2, 5")
cpuset_ids = self.libvirtconnection._get_cpuset_ids()
self.assertEqual([1, 3, 5], cpuset_ids)
# invalid syntax
self.flags(vcpu_pin_set=" -1-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3-,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2^")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^2-")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="--13,^^5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="a-3,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-a,5,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,b,^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="1-3,5,^c")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="3 - 1, 5 , ^ 2 ")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set=" 1,1, ^1")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set=" 1,^1,^1,2, ^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
self.flags(vcpu_pin_set="^2")
self.assertRaises(exception.Invalid,
self.libvirtconnection._get_cpuset_ids)
class LibvirtVolumeUsageTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver
.get_all_volume_usage"""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
# creating instance
inst = {}
inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.ins_ref = db.instance_create(self.c, inst)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169L, 688640L, 0L, 0L, -1L)
self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError('invalid path')
self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.TestCase):
"""Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
| test_destroy_undefines |
utils_test.go | package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
uuid "github.com/hashicorp/go-uuid"
)
func TestUtilUUID(t *testing.T) {
for id := 1; id < 11; id++ {
recordUUID, err := uuid.GenerateUUID()
t.Logf("Checking[%d]: %s\n", id, recordUUID)
if err != nil {
t.Fatalf("Failed to generate UUID %s: %s ", recordUUID, err)
} else if isValidUUID(recordUUID) == false {
t.Fatalf("Failed to validate UUID: %s ", recordUUID)
}
}
}
func TestUtilAppNames(t *testing.T) {
goodApps := []string{"penn", "teller", "a123", "good_app"}
for _, value := range goodApps {
if isValidApp(value) == false {
t.Fatalf("Failed to validate good app name: %s ", value)
}
}
badApps := []string{"P1", "4as", "_a", "a.a", "a a", "a!b"}
for _, value := range badApps {
if isValidApp(value) == true {
t.Fatalf("Failed to validate bad app name: %s ", value)
}
}
}
func TestUtilStringPatternMatch(t *testing.T) |
func TestUtilGetJSONPost(t *testing.T) {
goodJsons := []string{
`{"login":"abc","name": "tom", "pass": "mylittlepony", "admin": true}`,
`{"login":1,"name": "tom", "pass": "mylittlepony", "admin": true}`,
`{"login":123,"name": "tom", "pass": "mylittlepony", "admin": true}`,
`{"login":"1234","name": "tom", "pass": "mylittlepony", "admin": true}`,
}
for _, value := range goodJsons {
request := httptest.NewRequest("POST", "/user", strings.NewReader(value))
request.Header.Set("Content-Type", "application/json")
result, err := getJSONPost(request, "IL")
if err != nil {
t.Fatalf("Failed to parse json: %s, err: %s\n", value, err)
}
if len(result.loginIdx) == 0 {
t.Fatalf("Failed to parse login index from json: %s ", value)
}
}
badJsons := []string{
`{"login":true,"name": "tom", "pass": "mylittlepony", "admin": true}`,
`{"login":null,"name": "tom", "pass": "mylittlepony", "admin": true}`,
}
for _, value := range badJsons {
request := httptest.NewRequest("POST", "/user", strings.NewReader(value))
request.Header.Set("Content-Type", "application/json")
result, err := getJSONPost(request, "IL")
if err != nil {
t.Fatalf("Failed to parse json: %s, err: %s\n", value, err)
}
if len(result.loginIdx) != 0 {
t.Fatalf("Failed to parse login index from json: %s ", value)
}
}
}
func TestUtilSMS(t *testing.T) {
server := httptest.NewServer(reqMiddleware(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(200)
defer req.Body.Close()
bodyBytes, _ := ioutil.ReadAll(req.Body)
fmt.Printf("body: %s\n", string(bodyBytes))
if string(bodyBytes) != "Body=Data+Bunker+code+1234&From=from1234&To=4444" {
t.Fatalf("bad request: %s", string(bodyBytes))
}
})))
// Close the server when test finishes
defer server.Close()
client := server.Client()
domain := server.URL
var cfg Config
sendCodeByPhoneDo(domain, client, 1234, "4444", cfg)
}
func TestUtilNotifyConsentChange(t *testing.T) {
q := make(chan string)
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(200)
defer req.Body.Close()
bodyBytes, _ := ioutil.ReadAll(req.Body)
fmt.Printf("body: %s\n", string(bodyBytes))
if string(bodyBytes) != `{"action":"consentchange","identity":"[email protected]","brief":"brief","mode":"email","status":"no"}` {
q <- fmt.Sprintf("bad request in notifyConsentChange: %s", string(bodyBytes))
} else {
q <- "ok"
}
}))
// Close the server when test finishes
defer server.Close()
notifyConsentChange(server.URL, "brief", "no", "email", "[email protected]")
response := <-q
if response != "ok" {
t.Fatal(response)
}
}
func TestUtilNotifyProfileNew(t *testing.T) {
q := make(chan string)
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(200)
defer req.Body.Close()
bodyBytes, _ := ioutil.ReadAll(req.Body)
fmt.Printf("body: %s\n", string(bodyBytes))
if string(bodyBytes) != `{"action":"profilenew","identity":"[email protected]","mode":"email","profile":{"name":"alex"}}` {
q <- fmt.Sprintf("bad request in notifyConsentChange: %s", string(bodyBytes))
} else {
q <- "ok"
}
}))
// Close the server when test finishes
defer server.Close()
profile := []byte(`{"name":"alex"}`)
notifyProfileNew(server.URL, profile, "email", "[email protected]")
response := <-q
if response != "ok" {
t.Fatal(response)
}
}
func TestUtilNotifyForgetMe(t *testing.T) {
q := make(chan string)
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(200)
defer req.Body.Close()
bodyBytes, _ := ioutil.ReadAll(req.Body)
fmt.Printf("body: %s\n", string(bodyBytes))
if string(bodyBytes) != `{"action":"forgetme","identity":"[email protected]","mode":"email","profile":{"name":"alex"}}` {
q <- fmt.Sprintf("bad request in notifyConsentChange: %s", string(bodyBytes))
} else {
q <- "ok"
}
}))
// Close the server when test finishes
defer server.Close()
profile := []byte(`{"name":"alex"}`)
notifyForgetMe(server.URL, profile, "email", "[email protected]")
response := <-q
if response != "ok" {
t.Fatal(response)
}
}
func TestUtilNotifyProfileChange(t *testing.T) {
q := make(chan string)
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(200)
defer req.Body.Close()
bodyBytes, _ := ioutil.ReadAll(req.Body)
fmt.Printf("body: %s\n", string(bodyBytes))
if string(bodyBytes) != `{"action":"profilechange","identity":"[email protected]","mode":"email","old":{"name":"alex2"},"profile":{"name":"alex3"}}` {
q <- fmt.Sprintf("bad request in notifyConsentChange: %s", string(bodyBytes))
} else {
q <- "ok"
}
}))
// Close the server when test finishes
defer server.Close()
profile := []byte(`{"name":"alex2"}`)
profile2 := []byte(`{"name":"alex3"}`)
notifyProfileChange(server.URL, profile, profile2, "email", "[email protected]")
response := <-q
if response != "ok" {
t.Fatal(response)
}
}
| {
goodJsons := []map[string]interface{}{
{"pattern": "*", "name": "tom", "result": true},
{"pattern": "aa", "name": "tom", "result": false},
{"pattern": "", "name": "aa", "result": false},
{"pattern": "test*", "name": "123testabc", "result": false},
{"pattern": "test*", "name": "testabc", "result": true},
{"pattern": "*test*", "name": "test1", "result": true},
{"pattern": "*test", "name": "123testabc", "result": false},
{"pattern": "*test", "name": "123test", "result": true},
}
for _, value := range goodJsons {
if stringPatternMatch(value["pattern"].(string), value["name"].(string)) != value["result"].(bool) {
t.Fatalf("Failed in %s match %s\n", value["pattern"].(string), value["name"].(string))
}
}
} |
parse.js | 'use strict';
var MailParser = require('mailparser').MailParser;
module.exports = function parser() {
return function(mail, smtp, next) {
var mailParser = new MailParser();
mailParser.on('end', function(mailObject) {
mail.json = mailObject;
if (mail.json.attachments) {
mail.json.attachments.forEach(function(attachment, idx, attachments) {
if (!attachment.content) {
return; | attachments[idx].content = attachment.content.toString('base64');
});
}
next();
});
mailParser.write(mail.raw);
mailParser.end();
};
} | } |
smiles_vocab_creator.py | #!/usr/bin/env python3
"""Export vocabulary of SMILESLanguage from .smi files in directory."""
import argparse
import os
from pytoda.smiles.smiles_language import SMILESLanguage
# define the parser arguments
parser = argparse.ArgumentParser()
parser.add_argument('smi_path', type=str, help='path to a folder with .smi files')
parser.add_argument(
'pretrained_path',
type=str,
help='path to a folder to store the language as text files.',
)
def | (smi_path: str, pretrained_path: str) -> None:
"""
Create a SMILESLanguage object and save it to disk.
Args:
smi_path (str): path to a folder containing .smi files.
pretrained_path (str): directory to store the language as text files.
"""
os.makedirs(pretrained_path, exist_ok=True)
smiles_language = SMILESLanguage()
smiles_language.add_smis(
[
os.path.join(smi_path, smi_filename)
for smi_filename in os.listdir(smi_path)
if smi_filename.endswith('.smi')
]
)
smiles_language.save_pretrained(pretrained_path)
if __name__ == '__main__':
# parse arguments
args = parser.parse_args()
# run the creation and export
create_smiles_language(args.smi_path, args.pretrained_path)
| create_smiles_language |
consensus.go | package commandline
import (
"fmt"
"github.com/ningxin18/go-bcos-sdk/precompiled/consensus"
"github.com/spf13/cobra"
)
var addObserver = &cobra.Command{
Use: "addObserver",
Short: "[nodeID] Add an observer node",
Long: `Add an observer node from sealer list or free node list.
Arguments:
[nodeID]: string
For example:
[addObserver] [67f01658fe24d9cc24dce0af580a4646b8e4a229d9cb7f445b16253232a0f4013426ca16d587b610bc0c70a1f741ce7448abef58d98ef73557b669de29fa3f26]
For more information please refer:
https://fisco-bcos-documentation.readthedocs.io/zh_CN/latest/docs/manual/console.html#addobserver`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
nodeID := args[0]
consensusService, err := consensus.NewConsensusService(RPC)
if err != nil {
fmt.Printf("addObserver failed, consensus.NewConsensusService err: %v\n", err)
return
}
result, err := consensusService.AddObserver(nodeID)
if err != nil {
fmt.Printf("addObserver failed, consensusService.AddObserver err: %v\n", err)
return
}
if result != 1 {
fmt.Println("addObserver failed")
return
}
fmt.Println(DefaultSuccessMessage)
},
}
var addSealer = &cobra.Command{
Use: "addSealer",
Short: "[nodeID] Add a sealer node",
Long: `Add a sealer node from observer list in group.
Arguments:
[nodeID]: string
For example:
[addSealer] [67f01658fe24d9cc24dce0af580a4646b8e4a229d9cb7f445b16253232a0f4013426ca16d587b610bc0c70a1f741ce7448abef58d98ef73557b669de29fa3f26]
For more information please refer:
https://fisco-bcos-documentation.readthedocs.io/zh_CN/latest/docs/manual/console.html#addsealer`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
nodeID := args[0]
consensusService, err := consensus.NewConsensusService(RPC)
if err != nil {
fmt.Printf("addSealer failed, consensus.NewConsensusService err: %v\n", err)
return
}
result, err := consensusService.AddSealer(nodeID)
if err != nil {
fmt.Printf("addSealer failed, consensusService.AddSealer err: %v\n", err)
return
}
if result != 1 {
fmt.Println("addSealer failed")
return
}
fmt.Println(DefaultSuccessMessage)
},
}
var removeNode = &cobra.Command{
Use: "removeNode",
Short: "[nodeID] Remove a node",
Long: `Remove a node from sealer list or observer list in group.
Arguments:
[nodeID]: string
For example:
[removeNode] [67f01658fe24d9cc24dce0af580a4646b8e4a229d9cb7f445b16253232a0f4013426ca16d587b610bc0c70a1f741ce7448abef58d98ef73557b669de29fa3f26]
For more information please refer:
https://fisco-bcos-documentation.readthedocs.io/zh_CN/latest/docs/manual/console.html#removenode`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
nodeID := args[0]
consensusService, err := consensus.NewConsensusService(RPC)
if err != nil {
fmt.Printf("removeNode failed, consensus.NewConsensusService err:%v\n", err)
return
}
result, err := consensusService.RemoveNode(nodeID)
if err != nil {
fmt.Printf("removeNode failed, consensusService.RemoveNode err: %v\n", err)
return | fmt.Println("removeNode failed")
return
}
fmt.Println(DefaultSuccessMessage)
},
}
func init() {
rootCmd.AddCommand(addObserver, addSealer, removeNode)
} | }
if result != 1 { |
startQiskit_noisy2320.py | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def | (s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.y(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.swap(input_qubit[3],input_qubit[0]) # number=22
prog.swap(input_qubit[3],input_qubit[0]) # number=23
prog.swap(input_qubit[1],input_qubit[0]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2320.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| bitwise_dot |
reconciler.go | package bucketclass
import (
"context"
"fmt"
"time"
nbv1 "github.com/noobaa/noobaa-operator/v2/pkg/apis/noobaa/v1alpha1"
"github.com/noobaa/noobaa-operator/v2/pkg/bundle"
"github.com/noobaa/noobaa-operator/v2/pkg/nb"
"github.com/noobaa/noobaa-operator/v2/pkg/options"
"github.com/noobaa/noobaa-operator/v2/pkg/system"
"github.com/noobaa/noobaa-operator/v2/pkg/util"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// Reconciler is the context for loading or reconciling a noobaa system
type Reconciler struct {
Request types.NamespacedName
Client client.Client
Scheme *runtime.Scheme
Ctx context.Context
Logger *logrus.Entry
Recorder record.EventRecorder
NBClient nb.Client
SystemInfo *nb.SystemInfo
BucketClass *nbv1.BucketClass
NooBaa *nbv1.NooBaa
}
// NewReconciler initializes a reconciler to be used for loading or reconciling a bucket class
func NewReconciler(
req types.NamespacedName,
client client.Client,
scheme *runtime.Scheme,
recorder record.EventRecorder,
) *Reconciler |
// Reconcile reads that state of the cluster for a System object,
// and makes changes based on the state read and what is in the System.Spec.
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *Reconciler) Reconcile() (reconcile.Result, error) {
res := reconcile.Result{}
log := r.Logger
log.Infof("Start ...")
util.KubeCheck(r.BucketClass)
if r.BucketClass.UID == "" {
log.Infof("BucketClass %q not found or deleted. Skip reconcile.", r.BucketClass.Name)
return reconcile.Result{}, nil
}
if util.EnsureCommonMetaFields(r.BucketClass, nbv1.Finalizer) {
if !util.KubeUpdate(r.BucketClass) {
log.Errorf("โ BucketClass %q failed to add mandatory meta fields", r.BucketClass.Name)
res.RequeueAfter = 3 * time.Second
return res, nil
}
}
system.CheckSystem(r.NooBaa)
var err error
if r.BucketClass.DeletionTimestamp != nil {
err = r.ReconcileDeletion()
} else {
err = r.ReconcilePhases()
}
if err != nil {
if perr, isPERR := err.(*util.PersistentError); isPERR {
r.SetPhase(nbv1.BucketClassPhaseRejected, perr.Reason, perr.Message)
log.Errorf("โ Persistent Error: %s", err)
if r.Recorder != nil {
r.Recorder.Eventf(r.BucketClass, corev1.EventTypeWarning, perr.Reason, perr.Message)
}
} else {
res.RequeueAfter = 3 * time.Second
// leave current phase as is
r.SetPhase("", "TemporaryError", err.Error())
log.Warnf("โณ Temporary Error: %s", err)
}
} else {
if r.BucketClass.Status.Mode != "OPTIMAL" && r.BucketClass.Status.Mode != "" {
if r.Recorder != nil {
r.Recorder.Eventf(r.BucketClass, corev1.EventTypeWarning, r.BucketClass.Status.Mode, r.BucketClass.Status.Mode)
}
}
r.SetPhase(
nbv1.BucketClassPhaseReady,
"BucketClassPhaseReady",
"noobaa operator completed reconcile - bucket class is ready",
)
log.Infof("โ
Done")
}
err = r.UpdateStatus()
// if updateStatus will fail to update the CR for any reason we will continue to requeue the reconcile
// until the spec status will reflect the actual status of the bucketclass
if err != nil {
res.RequeueAfter = 3 * time.Second
log.Warnf("โณ Temporary Error: %s", err)
}
return res, nil
}
// ReconcilePhases runs the reconcile flow and populates System.Status.
func (r *Reconciler) ReconcilePhases() error {
if err := r.ReconcilePhaseVerifying(); err != nil {
return err
}
if err := r.ReconcilePhaseConfiguring(); err != nil {
return err
}
return nil
}
// SetPhase updates the status phase and conditions
func (r *Reconciler) SetPhase(phase nbv1.BucketClassPhase, reason string, message string) {
c := &r.BucketClass.Status.Conditions
if phase == "" {
r.Logger.Infof("SetPhase: temporary error during phase %q", r.BucketClass.Status.Phase)
util.SetProgressingCondition(c, reason, message)
return
}
r.Logger.Infof("SetPhase: %s", phase)
r.BucketClass.Status.Phase = phase
switch phase {
case nbv1.BucketClassPhaseReady:
util.SetAvailableCondition(c, reason, message)
case nbv1.BucketClassPhaseRejected:
util.SetErrorCondition(c, reason, message)
default:
util.SetProgressingCondition(c, reason, message)
}
}
// UpdateStatus updates the bucket class status in kubernetes from the memory
func (r *Reconciler) UpdateStatus() error {
err := r.Client.Status().Update(r.Ctx, r.BucketClass)
if err != nil {
r.Logger.Errorf("UpdateStatus: %s", err)
return err
}
r.Logger.Infof("UpdateStatus: Done")
return nil
}
// ReconcilePhaseVerifying checks that we have the system and secret needed to reconcile
func (r *Reconciler) ReconcilePhaseVerifying() error {
r.SetPhase(
nbv1.BucketClassPhaseVerifying,
"BucketClassPhaseVerifying",
"noobaa operator started phase 1/2 - \"Verifying\"",
)
if r.NooBaa.UID == "" {
return util.NewPersistentError("MissingSystem",
fmt.Sprintf("NooBaa system %q not found or deleted", r.NooBaa.Name))
}
numTiers := len(r.BucketClass.Spec.PlacementPolicy.Tiers)
if numTiers != 1 && numTiers != 2 {
return util.NewPersistentError("UnsupportedNumberOfTiers",
"BucketClass supports only 1 or 2 tiers")
}
for i := range r.BucketClass.Spec.PlacementPolicy.Tiers {
tier := &r.BucketClass.Spec.PlacementPolicy.Tiers[i]
for _, backingStoreName := range tier.BackingStores {
backStore := &nbv1.BackingStore{
TypeMeta: metav1.TypeMeta{Kind: "BackingStore"},
ObjectMeta: metav1.ObjectMeta{
Name: backingStoreName,
Namespace: r.NooBaa.Namespace,
},
}
if !util.KubeCheck(backStore) {
return util.NewPersistentError("MissingBackingStore",
fmt.Sprintf("NooBaa BackingStore %q not found or deleted", backingStoreName))
}
if backStore.Status.Phase == nbv1.BackingStorePhaseRejected {
return util.NewPersistentError("RejectedBackingStore",
fmt.Sprintf("NooBaa BackingStore %q is in rejected phase", backingStoreName))
}
if backStore.Status.Phase != nbv1.BackingStorePhaseReady {
return fmt.Errorf("NooBaa BackingStore %q is not yet ready", backingStoreName)
}
}
}
return nil
}
// ReconcilePhaseConfiguring updates existing buckets to match the changes in bucket class
func (r *Reconciler) ReconcilePhaseConfiguring() error {
r.SetPhase(
nbv1.BucketClassPhaseConfiguring,
"BucketClassPhaseConfiguring",
"noobaa operator started phase 2/2 - \"Configuring\"",
)
objectBuckets := &nbv1.ObjectBucketList{}
obcSelector, _ := labels.Parse("noobaa-domain=" + options.SubDomainNS())
util.KubeList(objectBuckets, &client.ListOptions{LabelSelector: obcSelector})
var bucketNames []string
for i := range objectBuckets.Items {
ob := &objectBuckets.Items[i]
bucketClass := ob.Spec.AdditionalState["bucketclass"]
bucketClassGeneration := ob.Spec.AdditionalState["bucketclassgeneration"]
bucketName := ob.Spec.Endpoint.BucketName
if bucketClass != r.BucketClass.Name {
continue
}
if bucketClassGeneration == fmt.Sprintf("%d", r.BucketClass.Generation) {
continue
}
bucketNames = append(bucketNames, bucketName)
}
if len(bucketNames) == 0 {
return nil
}
sysClient, err := system.Connect(false)
if err != nil {
return err
}
r.NBClient = sysClient.NBClient
if err := r.UpdateBucketClass(); err != nil {
return err
}
return nil
}
// ReconcileDeletion handles the deletion of a bucket class using the noobaa api
func (r *Reconciler) ReconcileDeletion() error {
// Set the phase to let users know the operator has noticed the deletion request
if r.BucketClass.Status.Phase != nbv1.BucketClassPhaseDeleting {
r.SetPhase(
nbv1.BucketClassPhaseDeleting,
"BucketClassPhaseDeleting",
"noobaa operator started deletion",
)
err := r.UpdateStatus()
if err != nil {
return err
}
}
if r.NooBaa.UID == "" {
r.Logger.Infof("BucketClass %q remove finalizer because NooBaa system is already deleted", r.BucketClass.Name)
return r.FinalizeDeletion()
}
return r.FinalizeDeletion()
}
// FinalizeDeletion removed the finalizer and updates in order to let the bucket class get reclaimed by kubernetes
func (r *Reconciler) FinalizeDeletion() error {
util.RemoveFinalizer(r.BucketClass, nbv1.Finalizer)
if !util.KubeUpdate(r.BucketClass) {
return fmt.Errorf("BucketClass %q failed to remove finalizer %q", r.BucketClass.Name, nbv1.Finalizer)
}
return nil
}
// UpdateBucketClass updates all buckets that are assigned to a BucketClass
func (r *Reconciler) UpdateBucketClass() error {
log := r.Logger
if r.BucketClass == nil {
return fmt.Errorf("BucketClass not loaded %#v", r)
}
policyTiers := []nb.TierItem{}
tiers := []nb.TierInfo{}
for i := range r.BucketClass.Spec.PlacementPolicy.Tiers {
tier := &r.BucketClass.Spec.PlacementPolicy.Tiers[i]
// Tier is irrelevant and will be populated in the BE
policyTiers = append(policyTiers, nb.TierItem{Order: int64(i), Tier: "TEMP"})
// we assume either mirror or spread but no mix and the bucket class controller rejects mixed classes.
placement := "SPREAD"
if tier.Placement == nbv1.TierPlacementMirror {
placement = "MIRROR"
}
// Name is irrelevant and will be populated in the BE
tiers = append(tiers, nb.TierInfo{Name: "TEMP", AttachedPools: tier.BackingStores, DataPlacement: placement})
}
result, err := r.NBClient.UpdateBucketClass(nb.UpdateBucketClassParams{
Name: r.BucketClass.Name,
// Name is irrelevant and will be populated in the BE
Policy: nb.TieringPolicyInfo{Name: "TEMP", Tiers: policyTiers},
Tiers: tiers,
})
if err != nil {
return fmt.Errorf("Failed to update bucket class %q with error: %v - Can't revert changes", r.BucketClass.Name, err)
}
if result.ShouldRevert {
r.BucketClass.Spec.PlacementPolicy.Tiers = []nbv1.Tier{}
for _, t := range result.RevertToPolicy.Tiers {
placement := nbv1.TierPlacementSpread
if t.DataPlacement == "MIRROR" {
placement = nbv1.TierPlacementMirror
}
r.BucketClass.Spec.PlacementPolicy.Tiers = append(r.BucketClass.Spec.PlacementPolicy.Tiers,
nbv1.Tier{Placement: placement, BackingStores: t.AttachedPools})
}
util.KubeUpdate(r.BucketClass)
return util.NewPersistentError("InvalidConfReverting", fmt.Sprintf("Unable to change bucketclass due to error: %v", result.ErrorMessage))
// return fmt.Errorf("Failed to update bucket class %q with error: %v - Reverting back", r.BucketClass.Name, result.ErrorMessage)
}
log.Infof("โ
Successfully updated bucket class %q", r.BucketClass.Name)
return nil
}
| {
r := &Reconciler{
Request: req,
Client: client,
Scheme: scheme,
Recorder: recorder,
Ctx: context.TODO(),
Logger: logrus.WithField("bucketclass", req.Namespace+"/"+req.Name),
BucketClass: util.KubeObject(bundle.File_deploy_crds_noobaa_io_v1alpha1_bucketclass_cr_yaml).(*nbv1.BucketClass),
NooBaa: util.KubeObject(bundle.File_deploy_crds_noobaa_io_v1alpha1_noobaa_cr_yaml).(*nbv1.NooBaa),
}
// Set Namespace
r.BucketClass.Namespace = r.Request.Namespace
r.NooBaa.Namespace = r.Request.Namespace
// Set Names
r.BucketClass.Name = r.Request.Name
r.NooBaa.Name = options.SystemName
return r
} |
test_dist_mnist_lars.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
class TestDistMnist2x2Lars(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
def test_se_resnext(self):
self.check_with_place("dist_mnist_lars.py", delta=1e-5)
if __name__ == "__main__":
unittest.main() | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# |
|
mergeSort_test.go | package sorting_test
import (
"github.com/mrdulin/go-datastructure-algorithm/sorting"
"github.com/mrdulin/go-datastructure-algorithm/util"
"reflect"
"testing"
)
func TestMergeSort(t *testing.T) {
t.Run("should sort int slice correctly", func(t *testing.T) {
a := util.GenerateSeries(5)
got := sorting.MergeSort(a)
want := []int{1, 2, 3, 4, 5}
if !reflect.DeepEqual(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
if !reflect.DeepEqual(a, []int{5, 4, 3, 2, 1}) {
t.Errorf("shoule not mutate input slice, got: %v", a)
}
})
}
// cd sorting &&
// go test -bench=MergeSort -cpu=1 -count=5
//goos: darwin
//goarch: amd64
//pkg: github.com/mrdulin/go-datastructure-algorithm/sorting
//BenchmarkMergeSort 8228 167722 ns/op
//BenchmarkMergeSort 8144 163918 ns/op
//BenchmarkMergeSort 8353 164401 ns/op
//BenchmarkMergeSort 8174 173985 ns/op
//BenchmarkMergeSort 7459 201798 ns/op
//PASS
//ok github.com/mrdulin/go-datastructure-algorithm/sorting 9.218s
func | (b *testing.B) {
a := util.GenerateSeries(1000)
for i := 0; i < b.N; i++ {
sorting.MergeSort(a)
}
}
| BenchmarkMergeSort |
bracketClose.ts | import Bracket from "./bracket";
import BracketPointer from "./bracketPointer";
import Token from "./token";
export default class | extends Bracket {
public readonly openBracketPointer: BracketPointer;
constructor(token: Token, openBracket: BracketPointer) {
super(token, openBracket.bracket.colorIndex, openBracket.bracket.color);
this.openBracketPointer = openBracket;
}
}
| BracketClose |
panos_ha.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: panos_ha
short_description: Configures High Availability on PAN-OS
description:
- Configures High Availability on PAN-OS in A/S and A/A modes including
all HA interface configuration. Assumes physical interfaces are of
type HA already using panos_interface.
This module has the following limitations due to no support in pandevice -
* No peer_backup_ip, this prevents full configuration of ha1_backup links
* Speed and Duplex of ports was intentially skipped
author:
- Patrick Avery (@unknown)
version_added: '1.0.0'
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
- currently requires specific pandevice release 0.13
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.state
- paloaltonetworks.panos.fragments.vsys_import
- paloaltonetworks.panos.fragments.full_template_support
- paloaltonetworks.panos.fragments.deprecated_commit
notes:
- Checkmode is supported.
- Panorama is supported.
options:
# ha.HighAvailability
ha_enabled:
description:
- Enable HA
default: true
type: bool
ha_group_id:
description:
- The group identifier
default: 1
type: int
ha_config_sync:
description: Enabled configuration synchronization
default: true
type: bool
ha_peer_ip:
description: HA Peer HA1 IP address
type: str
ha_peer_ip_backup:
description: HA Peer HA1 Backup IP address
type: str
ha_mode:
description: Mode of HA
type: str
choices:
- active-passive
- active-active
default: active-passive
ha_passive_link_state:
description: Passive link state
type: str
choices:
- shutdown
- auto
default: auto
ha_state_sync:
description: Enabled state synchronization
type: bool
default: false
ha_ha2_keepalive:
description: Enable HA2 keepalives
type: bool
default: True
ha_ha2_keepalive_action:
description: HA2 keepalive action
type: str
ha_ha2_keepalive_threshold:
description: HA2 keepalive threshold
type: int
# Active/Active
ha_device_id:
description: HA3 device id (0 or 1)
type: int
choices:
- 0
- 1
ha_session_owner_selection:
description: active-active session owner mode
type: str
choices:
- primary-device
- first-packet
ha_session_setup:
description: active-active session setup mode
type: str
choices:
- primary-device
- first-packet
- ip-modulo
- ip-hash
ha_tentative_hold_time:
description: active-active tentative hold timer
type: int
ha_sync_qos:
description: active-active network sync qos
type: bool
ha_sync_virtual_router:
description: active-active network sync virtual router
type: bool
ha_ip_hash_key:
description: active-active hash key used by ip-hash algorithm
type: str
choices:
- source
- source-and-destination
# ha.HA1
ha1_ip_address:
description: IP of the HA1 interface
type: str
ha1_netmask:
description: Netmask of the HA1 interface
type: str
ha1_port:
description: Interface to use for this HA1 interface (eg. ethernet1/5)
type: str
ha1_gateway:
description: Default gateway of the HA1 interface
type: str
# ha.HA1Backup
ha1b_ip_address:
description: IP of the HA1Backup interface
type: str
ha1b_netmask:
description: Netmask of the HA1Backup interface
type: str
ha1b_port:
description: Interface to use for this HA1Backup interface (eg. ethernet1/5)
type: str
ha1b_gateway:
description: Default gateway of the HA1Backup interface
type: str
# ha.HA2
ha2_ip_address:
description: IP of the HA2 interface
type: str
ha2_netmask:
description: Netmask of the HA2 interface
type: str
ha2_port:
description: Interface to use for this HA2 interface (eg. ethernet1/5)
type: str
default: ha2-a
ha2_gateway:
description: Default gateway of the HA2 interface
type: str
# ha.HA2Backup
ha2b_ip_address:
description: IP of the HA2Backup interface
type: str
ha2b_netmask:
description: Netmask of the HA2Backup interface
type: str
ha2b_port:
description: Interface to use for this HA2Backup interface (eg. ethernet1/5)
type: str
ha2b_gateway:
description: Default gateway of the HA2Backup interface
type: str
# ha.HA3
ha3_port:
description: Interface to use for this HA3 interface (eg. ethernet1/5, ae1)
type: str
"""
EXAMPLES = """
- name: set ports to HA mode
panos_interface:
provider: '{{ provider }}'
if_name: "{{ item }}"
mode: "ha"
enable_dhcp: false
with_items:
- ethernet1/1
- ethernet1/2
- ethernet1/3
- ethernet1/4
- ethernet1/5
- name: Configure Active/Standby HA
panos_ha:
provider: '{{ provider }}'
state: present
ha_peer_ip: "192.168.50.1"
ha1_ip_address: "192.168.50.2"
ha1_netmask: "255.255.255.252"
ha1_port: "ethernet1/1"
ha2_port: "ethernet1/3"
- name: Configure Active/Active HA
panos_ha:
provider: "{{ provider }}"
state: present
ha_mode: "active-active"
ha_device_id: 0
ha_session_owner_selection: "first-packet"
ha_session_setup: "first-packet"
ha_peer_ip: "192.168.50.1"
ha_peer_ip_backup: "192.168.50.5"
ha1_port: "ethernet1/1"
ha1_ip_address: "192.168.50.2"
ha1_netmask: "255.255.255.252"
ha1b_port: "ethernet1/2"
ha1b_ip_address: "192.168.50.6"
ha1b_netmask: "255.255.255.252"
ha2_port: "ethernet1/3"
ha2b_port: "ethernet1/4"
ha3_port: "ethernet1/5"
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import (
get_connection,
)
try:
from panos.errors import PanDeviceError
from panos.ha import HA1, HA2, HA3, HA1Backup, HA2Backup, HighAvailability
except ImportError:
try:
from pandevice.errors import PanDeviceError
from pandevice.ha import HA1, HA2, HA3, HA1Backup, HA2Backup, HighAvailability
except ImportError:
pass
def setup_args():
return dict(
commit=dict(type="bool", default=False),
ha_enabled=dict(type="bool", default=True),
ha_group_id=dict(type="int", default=1),
ha_config_sync=dict(type="bool", default=True),
ha_peer_ip=dict(type="str"),
ha_peer_ip_backup=dict(type="str"),
ha_mode=dict(
type="str",
choices=["active-passive", "active-active"],
default="active-passive",
),
ha_passive_link_state=dict(
type="str", choices=["shutdown", "auto"], default="auto"
),
ha_state_sync=dict(type="bool", default=False),
ha_ha2_keepalive=dict(type="bool", default=True),
ha_ha2_keepalive_action=dict(type="str"),
ha_ha2_keepalive_threshold=dict(type="int"),
ha_device_id=dict(type="int", choices=[0, 1]),
ha_session_owner_selection=dict(
type="str", choices=["primary-device", "first-packet"]
),
ha_session_setup=dict(
type="str",
choices=["primary-device", "first-packet", "ip-modulo", "ip-hash"],
),
ha_tentative_hold_time=dict(type="int"),
ha_sync_qos=dict(type="bool"),
ha_sync_virtual_router=dict(type="bool"),
ha_ip_hash_key=dict(type="str", choices=["source", "source-and-destination"]),
ha1_ip_address=dict(type="str"),
ha1_netmask=dict(type="str"),
ha1_port=dict(type="str"),
ha1_gateway=dict(type="str"),
ha1b_ip_address=dict(type="str"),
ha1b_netmask=dict(type="str"),
ha1b_port=dict(type="str"),
ha1b_gateway=dict(type="str"),
ha2_ip_address=dict(type="str"),
ha2_netmask=dict(type="str"),
ha2_port=dict(type="str", default="ha2-a"),
ha2_gateway=dict(type="str"),
ha2b_ip_address=dict(type="str"),
ha2b_netmask=dict(type="str"),
ha2b_port=dict(type="str"),
ha2b_gateway=dict(type="str"),
ha3_port=dict(type="str"),
)
def | ():
helper = get_connection(
vsys_importable=True,
template=True,
template_stack=True,
with_state=True,
min_pandevice_version=(0, 13, 0),
with_classic_provider_spec=True,
argument_spec=setup_args(),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
# Retrieve current HA configuration.
try:
listing = HighAvailability.refreshall(parent, add=False)
except PanDeviceError as e:
module.fail_json(msg="Failed refresh: {0}".format(e))
# Exclude non-object items from kwargs passed to the object.
exclude_list = [
"ip_address",
"username",
"password",
"api_key",
"state",
"commit",
"provider",
"template",
"template_stack",
"vsys",
"port",
]
# Remove excluded items from spec
spec_included = dict(
(k, module.params[k])
for k in helper.argument_spec.keys()
if k not in exclude_list
)
# Generate the kwargs for ha.HighAvailability
ha_obj_spec = {
k.replace("ha_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha_")
}
# Generate the kwargs for ha.HA1
ha1_obj_spec = {
k.replace("ha1_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha1_")
}
# Generate the kwargs for ha.HA1Backup
ha1b_obj_spec = {
k.replace("ha1b_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha1b_")
}
# Generate the kwargs for ha.HA2
ha2_obj_spec = {
k.replace("ha2_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha2_")
}
# Generate the kwargs for ha.HA2Backup
ha2b_obj_spec = {
k.replace("ha2b_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha2b_")
}
# Generate the kwargs for ha.HA3
ha3_obj_spec = {
k.replace("ha3_", ""): spec_included[k]
for k in spec_included
if k.startswith("ha3_")
}
state = module.params["state"]
commit = module.params["commit"]
# Create the new state object.
obj = HighAvailability(**ha_obj_spec)
# Add sub-objects only if at least one param for that type is specified.
"""
obj.add(HA1(**ha1_obj_spec))
obj.add(HA1Backup(**ha1b_obj_spec))
obj.add(HA2(**ha2_obj_spec))
obj.add(HA2Backup(**ha2b_obj_spec))
obj.add(HA3(**ha3_obj_spec))
"""
class_specs = [
(HA1, ha1_obj_spec),
(HA1Backup, ha1b_obj_spec),
(HA2, ha2_obj_spec),
(HA2Backup, ha2b_obj_spec),
(HA3, ha3_obj_spec),
]
for cls_type, cls_spec in class_specs:
if any(x is not None for x in cls_spec.values()):
sub_obj = cls_type(**cls_spec)
obj.add(sub_obj)
# Add ha object to parent
parent.add(obj)
# HighAvailability.refreshall() is not working for these in pandevice.ha
# removing until this is fixed to prevent changed from always equal to True
if listing:
# TODO(shinmog): Not sure if this is still needed or not
listing[0].session_owner_selection = obj.session_owner_selection
listing[0].session_setup = obj.session_setup
# Apply the state.
changed, diff = helper.apply_state(obj, listing, module)
if commit and changed:
helper.commit(module)
module.exit_json(msg="Done", changed=changed, diff=diff)
if __name__ == "__main__":
main()
| main |
pert.rs | use distribution;
use source::Source;
/// A PERT distribution.
#[derive(Clone, Copy, Debug)]
pub struct Pert {
a: f64,
b: f64,
c: f64,
alpha: f64,
beta: f64,
ln_beta: f64,
}
impl Pert {
/// Create a PERT distribution with parameters `a`, `b`, and `c`.
///
/// It should hold that `a < b < c`.
#[inline]
pub fn new(a: f64, b: f64, c: f64) -> Self {
use special::Beta as SpecialBeta;
should!(a < b && b < c);
let alpha = (4.0 * b + c - 5.0 * a) / (c - a);
let beta = (5.0 * c - a - 4.0 * b) / (c - a);
Pert {
a,
b,
c,
alpha,
beta,
ln_beta: alpha.ln_beta(beta),
}
} | #[inline(always)]
pub fn a(&self) -> f64 {
self.a
}
/// Return the second parameter.
#[inline(always)]
pub fn b(&self) -> f64 {
self.b
}
/// Return the third parameter.
#[inline(always)]
pub fn c(&self) -> f64 {
self.c
}
/// Return the first shape parameter of the corresponding Beta distribution.
#[inline(always)]
pub fn alpha(&self) -> f64 {
self.alpha
}
/// Return the second shape parameter of the corresponding Beta distribution.
#[inline(always)]
pub fn beta(&self) -> f64 {
self.beta
}
}
impl distribution::Continuous for Pert {
fn density(&self, x: f64) -> f64 {
if x < self.a || x > self.c {
0.0
} else {
let scale = self.c - self.a;
let x = (x - self.a) / scale;
((self.alpha - 1.0) * x.ln() + (self.beta - 1.0) * (-x).ln_1p() - self.ln_beta).exp()
/ scale
}
}
}
impl distribution::Distribution for Pert {
type Value = f64;
fn distribution(&self, x: f64) -> f64 {
use special::Beta;
if x <= self.a {
0.0
} else if x >= self.c {
1.0
} else {
((x - self.a) / (self.c - self.a)).inc_beta(self.alpha, self.beta, self.ln_beta)
}
}
}
impl distribution::Entropy for Pert {
fn entropy(&self) -> f64 {
use special::Gamma;
let sum = self.alpha + self.beta;
(self.c - self.a).ln() + self.ln_beta
- (self.alpha - 1.0) * self.alpha.digamma()
- (self.beta - 1.0) * self.beta.digamma()
+ (sum - 2.0) * sum.digamma()
}
}
impl distribution::Inverse for Pert {
#[inline]
fn inverse(&self, p: f64) -> f64 {
use special::Beta;
should!((0.0..=1.0).contains(&p));
self.a + (self.c - self.a) * p.inv_inc_beta(self.alpha, self.beta, self.ln_beta)
}
}
impl distribution::Kurtosis for Pert {
fn kurtosis(&self) -> f64 {
let sum = self.alpha + self.beta;
let delta = self.alpha - self.beta;
let product = self.alpha * self.beta;
6.0 * (delta * delta * (sum + 1.0) - product * (sum + 2.0))
/ (product * (sum + 2.0) * (sum + 3.0))
}
}
impl distribution::Mean for Pert {
#[inline]
fn mean(&self) -> f64 {
(self.a + self.b * 4.0 + self.c) / 6.0
}
}
impl distribution::Median for Pert {
fn median(&self) -> f64 {
use distribution::Inverse;
self.inverse(0.5)
}
}
impl distribution::Modes for Pert {
fn modes(&self) -> Vec<f64> {
vec![self.b]
}
}
impl distribution::Sample for Pert {
#[inline]
fn sample<S>(&self, source: &mut S) -> f64
where
S: Source,
{
use distribution::gamma;
let x = gamma::sample(self.alpha, source);
let y = gamma::sample(self.beta, source);
self.a + (self.c - self.a) * x / (x + y)
}
}
impl distribution::Skewness for Pert {
fn skewness(&self) -> f64 {
let sum = self.alpha + self.beta;
2.0 * (self.beta - self.alpha) * (sum + 1.0).sqrt()
/ ((sum + 2.0) * (self.alpha * self.beta).sqrt())
}
}
impl distribution::Variance for Pert {
fn variance(&self) -> f64 {
use distribution::Mean;
(self.mean() - self.a) * (self.c - self.mean()) / 7.0
}
}
#[cfg(test)]
mod tests {
use assert;
use prelude::*;
macro_rules! new(
($a:expr, $b:expr, $c:expr) => (Pert::new($a, $b, $c));
);
#[test]
fn density() {
let d = new!(-1.0, 0.5, 2.0);
let beta = Beta::new(3.0, 3.0, -1.0, 2.0);
let x = vec![-1.15, -1.0, -0.85, -0.5, 0.0, 0.5, 1.0, 1.5, 1.85, 2.0];
let p = vec![
0.0,
0.0,
0.022562499999999996,
0.19290123456790118,
0.4938271604938269,
0.6249999999999999,
0.49382716049382713,
0.1929012345679011,
0.022562499999999933,
0.0,
];
assert::close(
&x.iter().map(|&x| d.density(x)).collect::<Vec<_>>(),
&x.iter().map(|&x| beta.density(x)).collect::<Vec<_>>(),
1e-14,
);
assert::close(
&x.iter().map(|&x| d.density(x)).collect::<Vec<_>>(),
&p,
1e-14,
);
}
#[test]
fn distribution() {
let d = new!(-1.0, 0.5, 2.0);
let beta = Beta::new(3.0, 3.0, -1.0, 2.0);
let x = vec![-1.15, -1.0, -0.85, -0.5, 0.0, 0.5, 1.0, 1.5, 1.85, 2.0];
let p = vec![
0.0,
0.0,
0.001158125,
0.03549382716049382,
0.20987654320987656,
0.5,
0.7901234567901234,
0.9645061728395061,
0.998841875,
1.0,
];
assert::close(
&x.iter().map(|&x| d.distribution(x)).collect::<Vec<_>>(),
&x.iter().map(|&x| beta.distribution(x)).collect::<Vec<_>>(),
1e-14,
);
assert::close(
&x.iter().map(|&x| d.distribution(x)).collect::<Vec<_>>(),
&p,
1e-14,
);
}
#[test]
fn entropy() {
use std::f64::consts::E;
let d = vec![
new!(0.0, 0.5, 1.0),
new!(0.0, 0.5, E),
new!(0.0, 0.3, 1.0),
new!(-1.0, 1.0, 2.0),
];
assert::close(
&d.iter().map(|d| d.entropy()).collect::<Vec<_>>(),
&d.iter()
.map(|d| Beta::new(d.alpha(), d.beta(), d.a(), d.c()).entropy())
.collect::<Vec<_>>(),
1e-15,
);
}
#[test]
fn inverse() {
let d = new!(-1.0, 0.5, 2.0);
let p = vec![0.0, 0.2, 0.4, 0.6, 0.8, 1.0];
let x = vec![
-1.0,
-0.020206186475766774,
0.33876229245942,
0.6612377075405802,
1.0202061864757672,
2.0,
];
assert::close(
&p.iter().map(|&p| d.inverse(p)).collect::<Vec<_>>(),
&x,
1e-14,
);
}
#[test]
fn kurtosis() {
assert::close(new!(0.0, 0.5, 1.0).kurtosis(), -2.0 / 3.0, 1e-14);
}
#[test]
fn mean() {
assert::close(new!(0.0, 0.5, 1.0).mean(), 0.5, 1e-14);
assert::close(
new!(-1.0, 1.5, 2.0).mean(),
(1.5 * 4.0 - 1.0 + 2.0) / 6.0,
1e-14,
);
assert::close(
Beta::new(3.0, 3.0, -1.0, 2.0).mean(),
(0.5 * 4.0 - 1.0 + 2.0) / 6.0,
1e-14,
);
}
#[test]
fn median() {
assert::close(new!(0.0, 0.5, 1.0).median(), 0.5, 1e-14);
assert::close(new!(0.0, 0.3, 1.0).median(), 0.3509994849491181, 1e-14);
}
#[test]
fn modes() {
assert::close(new!(-1.0, 0.5, 2.0).modes(), vec![0.5], 1e-14);
}
#[test]
fn sample() {
for x in Independent(&new!(7.0, 20.0, 42.0), &mut source::default()).take(100) {
assert!(7.0 <= x && x <= 42.0);
}
}
#[test]
fn skewness() {
assert::close(new!(0.0, 0.5, 1.0).skewness(), 0.0, 1e-14);
assert::close(new!(-1.0, 0.2, 2.0).skewness(), 0.17797249266332246, 1e-14);
assert::close(new!(-1.0, 0.8, 2.0).skewness(), -0.17797249266332246, 1e-14);
}
#[test]
fn variance() {
assert::close(new!(0.0, 0.5, 1.0).variance(), 0.25 / 7.0, 1e-14);
assert::close(new!(0.0, 0.3, 1.0).variance(), 0.033174603174603176, 1e-14);
assert::close(new!(0.0, 0.9, 1.0).variance(), 0.02555555555555556, 1e-14);
}
} |
/// Return the first parameter. |
routes.go | package gisproxy
import (
"net/http"
"github.com/gorilla/mux"
)
//SetRouter set proxy router
func (proxyServer *GISProxy) SetRouter() *GISProxy {
proxyServer.Router = mux.NewRouter()
return proxyServer
}
//SetRoutes set proxy routes
func (proxyServer *GISProxy) SetRoutes() *GISProxy { | } | proxyServer.Router.HandleFunc("/token-auth", proxyServer.generateTokenHandler).Methods("POST")
proxyServer.Router.HandleFunc("/api/users", proxyServer.userResource).Methods("GET")
proxyServer.Router.PathPrefix("/geoserver").Handler(http.StripPrefix("/geoserver", http.HandlerFunc(proxyServer.authMiddleware(proxyServer.geoserverHandler))))
return proxyServer |
uvAnalyser.py | import csv
import os
# ====================
# Default variables:
default_avg_cnt = 10
default_exc_thr = 0.02
default_low_wvl = 300
default_hig_wvl = 1014
default_delimit = '\t'
default_exc_fin = True
# ====================
def welcome():
# Print a welcome screen and ask for user input. Check if input is valid.
# If so return input, if not return default values.
print("Welcome.\nThis script will merge all files in this directory, " +
"normalize them\nand make suggestions for the location of the " +
"first exciton.\nPress 'Enter' for default values (i.e. %d, %.2f).\n"
% (default_avg_cnt, default_exc_thr)
)
avg_cnt = raw_input("Number of baseline values for average: ")
if avg_cnt and valid_input(avg_cnt):
avg_cnt = int(avg_cnt)
else:
avg_cnt = default_avg_cnt
exc_thr = raw_input("Exciton absorbance threshold: ")
if exc_thr and valid_input(exc_thr):
exc_thr = float(exc_thr)
else:
exc_thr = default_exc_thr
print
return avg_cnt, exc_thr
def valid_input(x):
# Check if value is castable into int or float.
try:
int(x) or float(x)
return True
except ValueError:
return False
def read_data():
# Returns data from csv-files in current directory in form of a 2d list.
|
def normalize_data(data):
# Takes a 2d list, normalizes the values and returns it.
print "Normalizing data..."
dif = default_hig_wvl - default_low_wvl + 1
for col in range(1, len(data[0])):
avg = 0
for x in range(avg_cnt):
avg += float(data[dif-x][col])
avg /= avg_cnt
for row in range(1, dif+1):
data[row][col] = str(float(data[row][col])-avg)
return data
def write_data(data, delim):
# Takes a 2d list and a delimiter and writes a csv-file.
print "Writing data..."
with open("merged_files.txt", 'w') as output_file:
writer = csv.writer(output_file, delimiter=delim)
writer.writerows(data)
def exciton_finder(data):
# Takes a 2d list and writes a file with estimates for the first excitons.
if default_exc_fin:
with open("first_exciton.txt", 'w') as writer:
writer.write("sample\tfirst exciton [nm]")
exc_found = 0
for col in range(1, len(data[0])):
prev = 0
for row in range(len(data)-1, 0, -1):
if float(data[row][col]) > exc_thr:
if float(data[row][col]) < prev:
writer.write("\n%s\t%d" % (data[0][col],
row+default_low_wvl)
)
exc_found += 1
break
prev = float(data[row][col])
if exc_found == 0:
os.remove("first_exciton.txt")
print "%d of %d excitons found." % (exc_found, len(data[0])-1)
else:
print "Exciton finder disabled."
avg_cnt, exc_thr = welcome()
data = read_data()
data = normalize_data(data)
write_data(data, default_delimit)
exciton_finder(data)
raw_input("Press 'Enter' to close window...")
| print "Reading data..."
data = [["name"]]
for i in range(default_low_wvl, default_hig_wvl+1):
data.append([i])
for filename in os.listdir(os.getcwd()):
if filename.endswith("C.txt"):
data[0].append(':'.join([filename[11:13], filename[13:15]]))
with open(filename) as csvfile:
csvfile.next()
reader = csv.reader(csvfile, delimiter='\t')
for index, row in enumerate(reader):
data[index+1].append(row[1])
return data |
employee-links.component.ts | import { Component, Input } from '@angular/core';
import { Router } from '@angular/router';
@Component({
selector: 'ngx-employee-links',
template: `
<ng-container *ngIf="value">
<a
*ngIf="value?.name"
(click)="navigateToEmployee()" | >
<img
*ngIf="value.imageUrl"
width="18px"
height="18px"
[src]="value.imageUrl"
/>
{{ value.name }}
</a>
</ng-container>
`,
styles: [
`
.link-text {
cursor: pointer;
text-decoration: none;
}
.link-text:hover {
text-decoration: underline;
}
`
],
styleUrls: ['./employee-links.component.scss']
})
export class EmployeeLinksComponent {
@Input()
rowData: any;
@Input()
value: any;
constructor(private readonly _router: Router) {}
navigateToEmployee() {
if (!this.value) {
return;
}
this._router.navigate(['/pages/employees/edit/' + this.value.id]);
}
} | class="link-text" |
widget-base.js | 'use strict';
System.register(['./events', '../common/util', '../common/decorators'], function (_export, _context) {
"use strict";
var getEventOption, Util, delayed, _dec, _desc, _value, _class, firstValue, WidgetBase;
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _applyDecoratedDescriptor(target, property, decorators, descriptor, context) {
var desc = {};
Object['ke' + 'ys'](descriptor).forEach(function (key) {
desc[key] = descriptor[key];
});
desc.enumerable = !!desc.enumerable;
desc.configurable = !!desc.configurable;
if ('value' in desc || desc.initializer) {
desc.writable = true;
}
desc = decorators.slice().reverse().reduce(function (desc, decorator) {
return decorator(target, property, desc) || desc;
}, desc);
if (context && desc.initializer !== void 0) {
desc.value = desc.initializer ? desc.initializer.call(context) : void 0;
desc.initializer = undefined;
}
if (desc.initializer === void 0) {
Object['define' + 'Property'](target, property, desc);
desc = null;
}
return desc;
}
return {
setters: [function (_events) {
getEventOption = _events.getEventOption;
}, function (_commonUtil) {
Util = _commonUtil.Util;
}, function (_commonDecorators) {
delayed = _commonDecorators.delayed;
}],
execute: function () {
firstValue = {};
_export('WidgetBase', WidgetBase = (_dec = delayed(), (_class = function () {
function | () {
_classCallCheck(this, WidgetBase);
}
WidgetBase.prototype.createWidget = function createWidget(option) {
var _this = this;
this.allOption = this.getWidgetOptions(option.element);
if (!this.ejOptions && !this.isEditor) {
this.createTwoWays();
}
this.eWidget = this.widget = jQuery($(option.element))[this.controlName](this.allOption).data(this.controlName);
if (this.templateProcessor) {
this.templateProcessor.initWidgetDependancies();
}
if (this.isEditor) {
this.widget.model._change = function (evt) {
if ('eValue' in _this) {
_this[_this.util.getBindablePropertyName('value')] = evt.value;
}
};
}
};
WidgetBase.prototype.bind = function bind(ctx, overrideCtx) {
this.parentCtx = overrideCtx;
if (this.widget && this.widget.element && this.isEditor) {
this.widget.option('value', this.eValue === undefined ? null : this.eValue);
}
};
WidgetBase.prototype.createTwoWays = function createTwoWays() {
var model = this.allOption;
var twoWays = this.twoWays;
var len = twoWays.length;
for (var i = 0; i < len; i++) {
var prop = twoWays[i];
ej.createObject(prop, this.addTwoways(prop), model);
}
};
WidgetBase.prototype.addTwoways = function addTwoways(prop) {
var model = this;
var value = firstValue;
return function (newVal, isApp) {
if (value === firstValue) {
var viewModelProp = model.util.getBindablePropertyName(prop);
value = model[viewModelProp];
if (value === undefined) {
value = this.defaults[prop];
}
return value;
}
if (newVal === undefined) {
return value;
}
if (value === newVal) {
return null;
}
value = newVal;
if (!isApp && model.util.hasValue(newVal)) {
var _viewModelProp = model.util.getBindablePropertyName(prop);
model[_viewModelProp] = newVal;
}
return null;
};
};
WidgetBase.prototype.getWidgetOptions = function getWidgetOptions(element) {
var propOptions = void 0;
if (this.ejOptions) {
propOptions = this.ejOptions;
} else {
propOptions = this.util.getOptions(this, this.controlProperties);
}
var eventOption = getEventOption(element);
if (this.hasChildProperty) {
this.getChildProperties(propOptions);
}
return Object.assign({}, propOptions, eventOption);
};
WidgetBase.prototype.getChildProperties = function getChildProperties(options) {
var PropertyName = this.childPropertyName;
var childCollection = this[PropertyName];
var len = childCollection.length;
if (len) {
options[PropertyName] = [];
var childProperties = childCollection[0].controlProperties;
for (var i = 0; i < len; i++) {
options[PropertyName].push(this.util.getOptions(childCollection[i], childProperties));
}
}
};
WidgetBase.prototype.attached = function attached() {
if (this.templateProcessor) {
this[this.childPropertyName].forEach(function (template) {
return template.setTemplates();
});
}
this.util = new Util();
this.createWidget({ element: this.element });
};
WidgetBase.prototype.unsubscribe = function unsubscribe() {
if (this.subscription) {
this.subscription.dispose();
this.subscription = null;
}
};
WidgetBase.prototype.unbind = function unbind() {
this.unsubscribe();
};
WidgetBase.prototype.propertyChanged = function propertyChanged(property, newValue, oldValue) {
var _this2 = this;
if (this.widget) {
var modelValue = void 0;
var prop = this.util.getControlPropertyName(this, property);
this.unsubscribe();
if (this.arrayObserver) {
this.arrayObserver.forEach(function (arrayProp) {
if (_this2[arrayProp] instanceof Array) {
_this2.subscription = _this2.bindingInstance.collectionObserver(_this2[arrayProp]).subscribe(function (e) {
_this2.update(e);
});
}
});
}
if (prop) {
if (prop === 'widget') {
return;
} else if (prop !== 'options') {
modelValue = this.widget.model[prop];
var isTwoway = typeof modelValue === 'function';
if (isTwoway) {
modelValue = modelValue();
}
if (modelValue !== newValue) {
if (isTwoway) {
newValue = this.addTwoways(prop);
}
this.widget.option(prop, newValue);
}
} else {
this.widget.option(newValue);
}
}
}
};
WidgetBase.prototype.update = function update(e) {
var _this3 = this;
var modelValue = void 0;
var newVal = void 0;
if (e.length) {
this.arrayObserver.forEach(function (arrayProp) {
if (_this3[arrayProp] instanceof Array) {
var prop = _this3.util.getControlPropertyName(_this3, arrayProp);
modelValue = _this3.widget.model[prop];
if (typeof modelValue === 'function') {
modelValue = modelValue();
newVal = modelValue;
newVal = _this3.addTwoways(prop);
_this3.widget.option(prop, newVal);
} else {
_this3.widget.option(prop, modelValue);
}
}
});
}
};
WidgetBase.prototype.detached = function detached() {
if (this.templateProcessor) {
this.templateProcessor.clearTempalte();
}
if (this.widget) {
this.widget.destroy();
}
};
return WidgetBase;
}(), (_applyDecoratedDescriptor(_class.prototype, 'attached', [_dec], Object.getOwnPropertyDescriptor(_class.prototype, 'attached'), _class.prototype)), _class)));
_export('WidgetBase', WidgetBase);
}
};
});
//# sourceMappingURL=../dist/dev/common/widget-base.js.map
| WidgetBase |
config.go | package vsphereprivate
import (
"context"
"fmt"
"log"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/terraform-providers/terraform-provider-vsphere/vsphere"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/vapi/rest"
)
// VSphereClient - The VIM/govmomi client.
type VSphereClient struct {
// vim client
vimClient *govmomi.Client
// rest client for tags
restClient *rest.Client
}
// ConfigWrapper - wrapping the terraform-provider-vsphere Config struct
type ConfigWrapper struct {
config *vsphere.Config
}
// NewConfig function
func NewConfig(d *schema.ResourceData) (*ConfigWrapper, error) {
config, err := vsphere.NewConfig(d)
if err != nil {
return nil, err
}
return &ConfigWrapper{config}, nil
}
// vimURL returns a URL to pass to the VIM SOAP client.
func (cw *ConfigWrapper) vimURL() (*url.URL, error) {
u, err := url.Parse("https://" + cw.config.VSphereServer + "/sdk")
if err != nil {
return nil, fmt.Errorf("error parse url: %s", err)
}
u.User = url.UserPassword(cw.config.User, cw.config.Password)
return u, nil
}
// Client returns a new client for accessing VMWare vSphere.
func (cw *ConfigWrapper) Client() (*VSphereClient, error) {
client := new(VSphereClient)
u, err := cw.vimURL()
if err != nil {
return nil, fmt.Errorf("error generating SOAP endpoint url: %s", err)
}
err = cw.config.EnableDebug()
if err != nil {
return nil, fmt.Errorf("error setting up client debug: %s", err)
}
// Set up the VIM/govmomi client connection, or load a previous session
client.vimClient, err = cw.config.SavedVimSessionOrNew(u)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.TODO(), defaultAPITimeout)
defer cancel()
client.restClient, err = cw.config.SavedRestSessionOrNew(ctx, client.vimClient)
if err != nil { | log.Printf("[DEBUG] VMWare vSphere Client configured for URL: %s", cw.config.VSphereServer)
return client, nil
} | return nil, err
}
|
authcode_keyboard_test.go | package authentication
import (
"context"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/int128/kubelogin/pkg/adaptors/oidcclient"
"github.com/int128/kubelogin/pkg/adaptors/oidcclient/mock_oidcclient"
"github.com/int128/kubelogin/pkg/adaptors/reader/mock_reader"
"github.com/int128/kubelogin/pkg/domain/jwt"
"github.com/int128/kubelogin/pkg/testing/logger"
)
var nonNil = gomock.Not(gomock.Nil())
func | (t *testing.T) {
dummyTokenClaims := jwt.Claims{
Subject: "YOUR_SUBJECT",
Expiry: time.Date(2019, 1, 2, 3, 4, 5, 0, time.UTC),
Pretty: "PRETTY_JSON",
}
timeout := 5 * time.Second
t.Run("Success", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
o := &AuthCodeKeyboardOption{
AuthRequestExtraParams: map[string]string{"ttl": "86400", "reauth": "true"},
}
mockOIDCClient := mock_oidcclient.NewMockInterface(ctrl)
mockOIDCClient.EXPECT().SupportedPKCEMethods()
mockOIDCClient.EXPECT().
GetAuthCodeURL(nonNil).
Do(func(in oidcclient.AuthCodeURLInput) {
if diff := cmp.Diff(o.AuthRequestExtraParams, in.AuthRequestExtraParams); diff != "" {
t.Errorf("AuthRequestExtraParams mismatch (-want +got):\n%s", diff)
}
}).
Return("https://issuer.example.com/auth")
mockOIDCClient.EXPECT().
ExchangeAuthCode(nonNil, nonNil).
Do(func(_ context.Context, in oidcclient.ExchangeAuthCodeInput) {
if in.Code != "YOUR_AUTH_CODE" {
t.Errorf("Code wants YOUR_AUTH_CODE but was %s", in.Code)
}
}).
Return(&oidcclient.TokenSet{
IDToken: "YOUR_ID_TOKEN",
IDTokenClaims: dummyTokenClaims,
RefreshToken: "YOUR_REFRESH_TOKEN",
}, nil)
mockReader := mock_reader.NewMockInterface(ctrl)
mockReader.EXPECT().
ReadString(authCodeKeyboardPrompt).
Return("YOUR_AUTH_CODE", nil)
u := AuthCodeKeyboard{
Reader: mockReader,
Logger: logger.New(t),
}
got, err := u.Do(ctx, o, mockOIDCClient)
if err != nil {
t.Errorf("Do returned error: %+v", err)
}
want := &Output{
IDToken: "YOUR_ID_TOKEN",
IDTokenClaims: dummyTokenClaims,
RefreshToken: "YOUR_REFRESH_TOKEN",
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
})
}
| TestAuthCodeKeyboard_Do |
OAuth.ts | import DataStore from './index';
import UserModel from '@/models/User';
import { OAuthTable, OAuth } from '@/typings/database';
class | extends DataStore<OAuthTable> {
constructor() {
super('oauth');
}
async getConnectedOAuth(type: OAuth, id: string) {
const { entities: connect } = await this.find([{ key: 'id', op: '=', value: id }, { key: 'type', op: '=', value: type }]);
return connect[0];
}
async connectToUser(type: OAuth, id: string, user: string): Promise<boolean> {
// User exist check
const existUser = await UserModel.getUserInfoByPk(user);
if (!existUser) throw new Error('Not found user');
// Duplication check
const existConnect = await this.getConnectedOAuth(type, id);
if (existConnect) throw new Error('Already connect account');
// Connect to user
const newConnect = await this.create({ type, id, user });
if (!newConnect) throw new Error('Fail connect account');
return true;
}
async getConnectOAuthUser(type: OAuth, id: string): Promise<string> {
const connect = await this.getConnectedOAuth(type, id);
if (!connect) return null;
return connect.user.toString();
}
}
export default new OAuthDatabase();
| OAuthDatabase |
main.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"errors"
"flag"
"os"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/prow/pjutil/pprof"
"sigs.k8s.io/controller-runtime/pkg/manager"
prowapi "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/config/secret"
"k8s.io/test-infra/prow/crier"
gcsreporter "k8s.io/test-infra/prow/crier/reporters/gcs"
k8sgcsreporter "k8s.io/test-infra/prow/crier/reporters/gcs/kubernetes"
gerritreporter "k8s.io/test-infra/prow/crier/reporters/gerrit"
githubreporter "k8s.io/test-infra/prow/crier/reporters/github"
pubsubreporter "k8s.io/test-infra/prow/crier/reporters/pubsub"
slackreporter "k8s.io/test-infra/prow/crier/reporters/slack"
prowflagutil "k8s.io/test-infra/prow/flagutil"
configflagutil "k8s.io/test-infra/prow/flagutil/config"
gerritclient "k8s.io/test-infra/prow/gerrit/client"
"k8s.io/test-infra/prow/interrupts"
"k8s.io/test-infra/prow/io"
"k8s.io/test-infra/prow/logrusutil"
"k8s.io/test-infra/prow/metrics"
slackclient "k8s.io/test-infra/prow/slack"
)
type options struct {
client prowflagutil.KubernetesOptions
cookiefilePath string
gerritProjects gerritclient.ProjectsFlag
github prowflagutil.GitHubOptions
githubEnablement prowflagutil.GitHubEnablementOptions
config configflagutil.ConfigOptions
gerritWorkers int
pubsubWorkers int
githubWorkers int
slackWorkers int
gcsWorkers int
k8sGCSWorkers int
blobStorageWorkers int
k8sBlobStorageWorkers int
slackTokenFile string
additionalSlackTokenFiles slackclient.HostsFlag
storage prowflagutil.StorageClientOptions
instrumentationOptions prowflagutil.InstrumentationOptions
k8sReportFraction float64
dryrun bool
reportAgent string
}
func (o *options) validate() error {
// TODO(krzyzacy): gerrit && github report are actually stateful..
// Need a better design to re-enable parallel reporting
if o.gerritWorkers > 1 {
logrus.Warn("gerrit reporter only supports one worker")
o.gerritWorkers = 1
}
if o.gerritWorkers+o.pubsubWorkers+o.githubWorkers+o.slackWorkers+o.gcsWorkers+o.k8sGCSWorkers+o.blobStorageWorkers+o.k8sBlobStorageWorkers <= 0 {
return errors.New("crier need to have at least one report worker to start")
}
if o.k8sReportFraction < 0 || o.k8sReportFraction > 1 {
return errors.New("--kubernetes-report-fraction must be a float between 0 and 1")
}
if o.gerritWorkers > 0 {
if len(o.gerritProjects) == 0 {
logrus.Info("--gerrit-projects is not set, using global config")
}
if o.cookiefilePath == "" {
logrus.Info("--cookiefile is not set, using anonymous authentication")
}
}
if o.githubWorkers > 0 {
if err := o.github.Validate(o.dryrun); err != nil {
return err
}
}
if o.slackWorkers > 0 {
if o.slackTokenFile == "" && len(o.additionalSlackTokenFiles) == 0 {
return errors.New("one of --slack-token-file or --additional-slack-token-files must be set")
}
}
if o.gcsWorkers > 0 {
logrus.Warn("--gcs-workers is deprecated and will be removed in August 2020. Use --blob-storage-workers instead.")
// return an error when the old and new flags are both set
if o.blobStorageWorkers != 0 {
return errors.New("only one of --gcs-workers or --blog-storage-workers can be set at the same time")
}
// use gcsWorkers if blobStorageWorkers is not set
o.blobStorageWorkers = o.gcsWorkers
}
if o.k8sGCSWorkers > 0 {
logrus.Warn("--kubernetes-gcs-workers is deprecated and will be removed in August 2020. Use --kubernetes-blob-storage-workers instead.")
// return an error when the old and new flags are both set
if o.k8sBlobStorageWorkers != 0 {
return errors.New("only one of --kubernetes-gcs-workers or --kubernetes-blog-storage-workers can be set at the same time")
}
// use k8sGCSWorkers if k8sBlobStorageWorkers is not set
o.k8sBlobStorageWorkers = o.k8sGCSWorkers
}
for _, opt := range []interface{ Validate(bool) error }{&o.client, &o.githubEnablement, &o.config} {
if err := opt.Validate(o.dryrun); err != nil {
return err
}
}
return nil
}
func (o *options) parseArgs(fs *flag.FlagSet, args []string) error {
o.gerritProjects = gerritclient.ProjectsFlag{}
fs.StringVar(&o.cookiefilePath, "cookiefile", "", "Path to git http.cookiefile, leave empty for anonymous")
fs.Var(&o.gerritProjects, "gerrit-projects", "Set of gerrit repos to monitor on a host example: --gerrit-host=https://android.googlesource.com=platform/build,toolchain/llvm, repeat flag for each host")
fs.IntVar(&o.gerritWorkers, "gerrit-workers", 0, "Number of gerrit report workers (0 means disabled)")
fs.IntVar(&o.pubsubWorkers, "pubsub-workers", 0, "Number of pubsub report workers (0 means disabled)")
fs.IntVar(&o.githubWorkers, "github-workers", 0, "Number of github report workers (0 means disabled)")
fs.IntVar(&o.slackWorkers, "slack-workers", 0, "Number of Slack report workers (0 means disabled)")
fs.Var(&o.additionalSlackTokenFiles, "additional-slack-token-files", "Map of additional slack token files. example: --additional-slack-token-files=foo=/etc/foo-slack-tokens/token, repeat flag for each host")
fs.IntVar(&o.gcsWorkers, "gcs-workers", 0, "Number of GCS report workers (0 means disabled)")
fs.IntVar(&o.k8sGCSWorkers, "kubernetes-gcs-workers", 0, "Number of Kubernetes-specific GCS report workers (0 means disabled)")
fs.IntVar(&o.blobStorageWorkers, "blob-storage-workers", 0, "Number of blob storage report workers (0 means disabled)")
fs.IntVar(&o.k8sBlobStorageWorkers, "kubernetes-blob-storage-workers", 0, "Number of Kubernetes-specific blob storage report workers (0 means disabled)")
fs.Float64Var(&o.k8sReportFraction, "kubernetes-report-fraction", 1.0, "Approximate portion of jobs to report pod information for, if kubernetes-gcs-workers are enabled (0 - > none, 1.0 -> all)")
fs.StringVar(&o.slackTokenFile, "slack-token-file", "", "Path to a Slack token file")
fs.StringVar(&o.reportAgent, "report-agent", "", "Only report specified agent - empty means report to all agents (effective for github and Slack only)")
// TODO(krzyzacy): implement dryrun for gerrit/pubsub
fs.BoolVar(&o.dryrun, "dry-run", false, "Run in dry-run mode, not doing actual report (effective for github and Slack only)")
o.config.AddFlags(fs)
o.github.AddFlags(fs)
o.client.AddFlags(fs)
o.storage.AddFlags(fs)
o.instrumentationOptions.AddFlags(fs)
o.githubEnablement.AddFlags(fs)
fs.Parse(args)
return o.validate()
}
func parseOptions() options {
var o options
if err := o.parseArgs(flag.CommandLine, os.Args[1:]); err != nil {
logrus.WithError(err).Fatal("Invalid flag options")
}
return o
}
func main() | {
logrusutil.ComponentInit()
o := parseOptions()
defer interrupts.WaitForGracefulShutdown()
pprof.Instrument(o.instrumentationOptions)
configAgent, err := o.config.ConfigAgent()
if err != nil {
logrus.WithError(err).Fatal("Error starting config agent.")
}
cfg := configAgent.Config
restCfg, err := o.client.InfrastructureClusterConfig(o.dryrun)
if err != nil {
logrus.WithError(err).Fatal("Failed to get kubeconfig")
}
mgr, err := manager.New(restCfg, manager.Options{
Namespace: cfg().ProwJobNamespace,
MetricsBindAddress: "0",
})
if err != nil {
logrus.WithError(err).Fatal("failed to create manager")
}
// The watch apimachinery doesn't support restarts, so just exit the binary if a kubeconfig changes
// to make the kubelet restart us.
if err := o.client.AddKubeconfigChangeCallback(func() {
logrus.Info("Kubeconfig changed, exiting to trigger a restart")
interrupts.Terminate()
}); err != nil {
logrus.WithError(err).Fatal("Failed to register kubeconfig change callback")
}
var hasReporter bool
if o.slackWorkers > 0 {
if cfg().SlackReporterConfigs == nil {
logrus.Fatal("slackreporter is enabled but has no config")
}
slackConfig := func(refs *prowapi.Refs) config.SlackReporter {
return cfg().SlackReporterConfigs.GetSlackReporter(refs)
}
tokensMap := make(map[string]func() []byte)
if o.slackTokenFile != "" {
tokensMap[slackreporter.DefaultHostName] = secret.GetTokenGenerator(o.slackTokenFile)
if err := secret.Add(o.slackTokenFile); err != nil {
logrus.WithError(err).Fatal("could not read slack token")
}
}
hasReporter = true
for host, additionalTokenFile := range o.additionalSlackTokenFiles {
tokensMap[host] = secret.GetTokenGenerator(additionalTokenFile)
if err := secret.Add(additionalTokenFile); err != nil {
logrus.WithError(err).Fatal("could not read slack token")
}
}
slackReporter := slackreporter.New(slackConfig, o.dryrun, tokensMap)
if err := crier.New(mgr, slackReporter, o.slackWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct slack reporter controller")
}
}
if o.gerritWorkers > 0 {
gerritReporter, err := gerritreporter.NewReporter(cfg, o.cookiefilePath, o.gerritProjects, mgr.GetCache())
if err != nil {
logrus.WithError(err).Fatal("Error starting gerrit reporter")
}
hasReporter = true
if err := crier.New(mgr, gerritReporter, o.gerritWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct gerrit reporter controller")
}
}
if o.pubsubWorkers > 0 {
hasReporter = true
if err := crier.New(mgr, pubsubreporter.NewReporter(cfg), o.pubsubWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct pubsub reporter controller")
}
}
if o.githubWorkers > 0 {
if o.github.TokenPath != "" {
if err := secret.Add(o.github.TokenPath); err != nil {
logrus.WithError(err).Fatal("Error reading GitHub credentials")
}
}
githubClient, err := o.github.GitHubClient(o.dryrun)
if err != nil {
logrus.WithError(err).Fatal("Error getting GitHub client.")
}
hasReporter = true
githubReporter := githubreporter.NewReporter(githubClient, cfg, prowapi.ProwJobAgent(o.reportAgent), mgr.GetCache())
if err := crier.New(mgr, githubReporter, o.githubWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct github reporter controller")
}
}
if o.blobStorageWorkers > 0 || o.k8sBlobStorageWorkers > 0 {
opener, err := io.NewOpener(context.Background(), o.storage.GCSCredentialsFile, o.storage.S3CredentialsFile)
if err != nil {
logrus.WithError(err).Fatal("Error creating opener")
}
hasReporter = true
if err := crier.New(mgr, gcsreporter.New(cfg, opener, o.dryrun), o.blobStorageWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct gcsreporter controller")
}
if o.k8sBlobStorageWorkers > 0 {
coreClients, err := o.client.BuildClusterCoreV1Clients(o.dryrun)
if err != nil {
logrus.WithError(err).Fatal("Error building pod client sets for Kubernetes GCS workers")
}
k8sGcsReporter := k8sgcsreporter.New(cfg, opener, coreClients, float32(o.k8sReportFraction), o.dryrun)
if err := crier.New(mgr, k8sGcsReporter, o.k8sBlobStorageWorkers, o.githubEnablement.EnablementChecker()); err != nil {
logrus.WithError(err).Fatal("failed to construct k8sgcsreporter controller")
}
}
}
if !hasReporter {
logrus.Fatalf("should have at least one controller to start crier.")
}
// Push metrics to the configured prometheus pushgateway endpoint or serve them
metrics.ExposeMetrics("crier", cfg().PushGateway, o.instrumentationOptions.MetricsPort)
if err := mgr.Start(interrupts.Context()); err != nil {
logrus.WithError(err).Fatal("controller manager failed")
}
logrus.Info("Ended gracefully")
} |
|
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Opendx(AutotoolsPackage):
"""Open Visualization Data Explorer."""
homepage = "https://github.com/Mwoolsey/OpenDX"
git = "https://github.com/Mwoolsey/OpenDX.git"
version('master', branch='master')
depends_on('motif') # lesstif also works, but exhibits odd behaviors
depends_on('gl')
@run_before('autoreconf')
def distclean(self):
| make('distclean') |
|
ExploitFrame.py |
class ExploitFrame(object):
"""Exploit object"""
def __init__(self, serviceInfo):
self.serviceInfo = serviceInfo
def exploit(self):
raise NotImplementedError()
def | (self):
raise NotImplementedError()
| exploitSuccess |
precise_stats.rs | use super::super::{
ComplexNumberSpace, Domain, DspVec, ErrorReason, RealNumberSpace, ScalarResult, Statistics,
Stats, StatsVec, ToSlice, Vector, STATS_VEC_CAPACTIY,
};
use super::{kahan_sum, kahan_sumb};
use crate::array_to_complex;
use crate::multicore_support::*;
use num_complex::Complex64;
use crate::numbers::*;
/// Offers the same functionality as the `StatisticsOps` trait but
/// the statistics are calculated in a more precise (and slower) way.
pub trait PreciseStatisticsOps<T> {
type Result;
/// Calculates the statistics of the data contained in the vector using
/// a more precise but slower algorithm.
///
/// # Example
///
/// ```
/// # extern crate num_complex;
/// # extern crate basic_dsp_vector;
/// # use num_complex::Complex64;
/// use basic_dsp_vector::*;
/// # fn main() {
/// let vector: Vec<f32> = vec!(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
/// let vector = vector.to_complex_time_vec();
/// let result = vector.statistics_prec();
/// assert_eq!(result.sum, Complex64::new(9.0, 12.0));
/// assert_eq!(result.count, 3);
/// assert_eq!(result.average, Complex64::new(3.0, 4.0));
/// assert!((result.rms - Complex64::new(3.4027193, 4.3102784)).norm() < 1e-4);
/// assert_eq!(result.min, Complex64::new(1.0, 2.0));
/// assert_eq!(result.min_index, 0);
/// assert_eq!(result.max, Complex64::new(5.0, 6.0));
/// assert_eq!(result.max_index, 2);
/// }
/// ```
fn statistics_prec(&self) -> Self::Result;
}
/// Offers the same functionality as the `StatisticsOps` trait but
/// the statistics are calculated in a more precise (and slower) way.
pub trait PreciseStatisticsSplitOps<T> {
type Result;
/// Calculates the statistics of the data contained in the vector as if the vector would
/// have been split into `len` pieces
/// using a more precise but slower algorithm. `self.len` should be dividable by
/// `len` without a remainder,
/// but this isn't enforced by the implementation.
/// For implementation reasons `len <= 16` must be true.
///
/// # Example
///
/// ```
/// # extern crate num_complex;
/// # extern crate basic_dsp_vector;
/// # use num_complex::Complex64;
/// use basic_dsp_vector::*;
/// # fn main() {
/// let vector: Vec<f32> = vec!(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
/// let vector = vector.to_complex_time_vec();
/// let result = vector.statistics_split_prec(2).expect("Ignoring error handling in examples");
/// assert_eq!(result[0].sum, Complex64::new(6.0, 8.0));
/// assert_eq!(result[1].sum, Complex64::new(3.0, 4.0));
/// }
/// ```
fn statistics_split_prec(&self, len: usize) -> ScalarResult<Self::Result>;
}
/// Offers the same functionality as the `SumOps` trait but
/// the sums are calculated in a more precise (and slower) way.
pub trait PreciseSumOps<T>: Sized
where
T: Sized,
{
/// Calculates the sum of the data contained in the vector
/// using a more precise but slower algorithm.
/// # Example
///
/// ```
/// # extern crate num_complex;
/// # extern crate basic_dsp_vector;
/// # use num_complex::Complex64;
/// use basic_dsp_vector::*;
/// # fn main() {
/// let vector = vec!(1.0, 2.0, 3.0, 4.0, 5.0, 6.0).to_complex_time_vec();
/// let result = vector.sum_prec();
/// assert_eq!(result, Complex64::new(9.0, 12.0));
/// }
/// ```
fn sum_prec(&self) -> T;
/// Calculates the sum of the squared data contained in the vector
/// using a more precise but slower algorithm.
/// # Example
///
/// ```
/// # extern crate num_complex;
/// # extern crate basic_dsp_vector;
/// # use num_complex::Complex64;
/// use basic_dsp_vector::*;
/// # fn main() {
/// let vector = vec!(1.0, 2.0, 3.0, 4.0, 5.0, 6.0).to_complex_time_vec();
/// let result = vector.sum_sq_prec();
/// assert_eq!(result, Complex64::new(-21.0, 88.0));
/// }
/// ```
fn sum_sq_prec(&self) -> T;
}
/// A trait for statistics which allows to add new values in a way so that the numerical
/// uncertainty has less impact on the final results.
pub trait PreciseStats<T>: Sized {
/// Adds a new values to the statistics using the Kahan summation algorithm
/// described here: https://en.wikipedia.org/wiki/Kahan_summation_algorithm
fn add_prec(&mut self, elem: T, index: usize, sumc: &mut T, rmsc: &mut T);
}
impl<S, N, D> PreciseStatisticsOps<f64> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: RealNumberSpace,
D: Domain,
{
type Result = Statistics<f64>;
fn statistics_prec(&self) -> Statistics<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
|array, range, _arg| {
let mut stats = Statistics::empty();
let mut j = range.start;
for num in array {
stats.add(f64::from(*num), j);
j += 1;
}
stats
},
);
Statistics::merge(&chunks[..])
}
}
impl<S, N, D> PreciseStatisticsSplitOps<f64> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: RealNumberSpace,
D: Domain,
{
type Result = StatsVec<Statistics<f64>>;
fn statistics_split_prec(&self, len: usize) -> ScalarResult<StatsVec<Statistics<f64>>> {
if len == 0 {
return Ok(StatsVec::new());
}
if len > STATS_VEC_CAPACTIY {
return Err(ErrorReason::InvalidArgumentLength);
}
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
len,
|array, range, len| {
let mut results = Statistics::empty_vec(len);
let mut j = range.start;
for num in array {
let stats = &mut results[j % len];
stats.add(f64::from(*num), j / len);
j += 1;
}
results
},
);
Ok(Statistics::merge_cols(&chunks[..]))
}
}
impl<S, N, D> PreciseStatisticsOps<f64> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: RealNumberSpace,
D: Domain,
{
type Result = Statistics<f64>;
fn statistics_prec(&self) -> Statistics<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
|array, range, _arg| {
let mut stats = Statistics::empty();
let mut j = range.start;
let mut sumc = 0.0;
let mut rmsc = 0.0;
for num in array {
stats.add_prec(*num, j, &mut sumc, &mut rmsc);
j += 1;
}
stats
},
);
Statistics::merge(&chunks[..])
}
}
impl<S, N, D> PreciseStatisticsSplitOps<f64> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: RealNumberSpace,
D: Domain,
{
type Result = StatsVec<Statistics<f64>>;
fn statistics_split_prec(&self, len: usize) -> ScalarResult<StatsVec<Statistics<f64>>> {
if len == 0 {
return Ok(StatsVec::new());
}
if len > STATS_VEC_CAPACTIY {
return Err(ErrorReason::InvalidArgumentLength);
}
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
len,
|array, range, len| {
let mut results = Statistics::empty_vec(len);
let mut j = range.start;
let mut sumc = 0.0;
let mut rmsc = 0.0;
for num in array {
let stats = &mut results[j % len];
stats.add_prec(*num, j / len, &mut sumc, &mut rmsc);
j += 1;
}
results
},
);
Ok(Statistics::merge_cols(&chunks[..]))
}
}
impl<S, N, D> PreciseSumOps<f64> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: RealNumberSpace,
D: Domain,
{
fn sum_prec(&self) -> f64 {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
move |array, _, _| {
let mut sum = 0.0;
for n in array {
sum += f64::from(*n);
} | );
(&chunks[..]).iter().fold(0.0, |a, b| a + b)
}
fn sum_sq_prec(&self) -> f64 {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
move |array, _, _| {
let mut sum = 0.0;
for n in array {
let t = f64::from(*n);
sum += t * t;
}
sum
},
);
(&chunks[..]).iter().fold(0.0, |a, b| a + b)
}
}
impl<S, N, D> PreciseSumOps<f64> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: RealNumberSpace,
D: Domain,
{
fn sum_prec(&self) -> f64 {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
move |array, _, _| kahan_sumb(array.iter()),
);
(&chunks[..]).iter().fold(0.0, |a, b| a + b)
}
fn sum_sq_prec(&self) -> f64 {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
1,
(),
move |array, _, _| kahan_sum(array.iter().map(|x| x * x)),
);
(&chunks[..]).iter().fold(0.0, |a, b| a + b)
}
}
impl<S, N, D> PreciseStatisticsOps<Complex<f64>> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: ComplexNumberSpace,
D: Domain,
{
type Result = Statistics<Complex<f64>>;
fn statistics_prec(&self) -> Statistics<Complex<f64>> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
|array, range, _arg| {
let mut stat = Statistics::<Complex64>::empty();
let mut j = range.start / 2;
let array = array_to_complex(array);
for num in array {
stat.add(Complex64::new(f64::from(num.re), f64::from(num.im)), j);
j += 1;
}
stat
},
);
Statistics::merge(&chunks[..])
}
}
impl<S, N, D> PreciseStatisticsSplitOps<Complex<f64>> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: ComplexNumberSpace,
D: Domain,
{
type Result = StatsVec<Statistics<Complex<f64>>>;
fn statistics_split_prec(
&self,
len: usize,
) -> ScalarResult<StatsVec<Statistics<Complex<f64>>>> {
if len == 0 {
return Ok(StatsVec::new());
}
if len > STATS_VEC_CAPACTIY {
return Err(ErrorReason::InvalidArgumentLength);
}
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
len,
|array, range, len| {
let mut results = Statistics::<Complex<f64>>::empty_vec(len);
let mut j = range.start / 2;
let array = array_to_complex(array);
for num in array {
let stat = &mut results[j % len];
stat.add(
Complex64::new(f64::from(num.re), f64::from(num.im)),
j / len,
);
j += 1;
}
results
},
);
Ok(Statistics::merge_cols(&chunks[..]))
}
}
impl<S, N, D> PreciseStatisticsOps<Complex<f64>> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: ComplexNumberSpace,
D: Domain,
{
type Result = Statistics<Complex<f64>>;
fn statistics_prec(&self) -> Statistics<Complex<f64>> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
|array, range, _arg| {
let mut stat = Statistics::<Complex64>::empty();
let mut j = range.start / 2;
let array = array_to_complex(array);
let mut sumc = Complex64::zero();
let mut rmsc = Complex64::zero();
for num in array {
stat.add_prec(*num, j, &mut sumc, &mut rmsc);
j += 1;
}
stat
},
);
Statistics::merge(&chunks[..])
}
}
impl<S, N, D> PreciseStatisticsSplitOps<Complex<f64>> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: ComplexNumberSpace,
D: Domain,
{
type Result = StatsVec<Statistics<Complex<f64>>>;
fn statistics_split_prec(
&self,
len: usize,
) -> ScalarResult<StatsVec<Statistics<Complex<f64>>>> {
if len == 0 {
return Ok(StatsVec::new());
}
if len > STATS_VEC_CAPACTIY {
return Err(ErrorReason::InvalidArgumentLength);
}
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
len,
|array, range, len| {
let mut results = Statistics::<Complex<f64>>::empty_vec(len);
let mut j = range.start / 2;
let array = array_to_complex(array);
let mut sumc = Complex64::zero();
let mut rmsc = Complex64::zero();
for num in array {
let stat = &mut results[j % len];
stat.add_prec(*num, j / len, &mut sumc, &mut rmsc);
j += 1;
}
results
},
);
Ok(Statistics::merge_cols(&chunks[..]))
}
}
impl<S, N, D> PreciseSumOps<Complex<f64>> for DspVec<S, f32, N, D>
where
S: ToSlice<f32>,
N: ComplexNumberSpace,
D: Domain,
{
fn sum_prec(&self) -> Complex<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
move |array, _, _| {
let mut sum = Complex64::zero();
let array = array_to_complex(&array[0..array.len()]);
for n in array {
sum += Complex64::new(f64::from(n.re), f64::from(n.im));
}
sum
},
);
(&chunks[..]).iter().fold(Complex64::zero(), |a, b| a + b)
}
fn sum_sq_prec(&self) -> Complex<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
move |array, _, _| {
let mut sum = Complex::<f64>::zero();
let array = array_to_complex(&array[0..array.len()]);
for n in array {
let t = Complex64::new(f64::from(n.re), f64::from(n.im));
sum += t * t;
}
sum
},
);
(&chunks[..]).iter().fold(Complex64::zero(), |a, b| a + b)
}
}
impl<S, N, D> PreciseSumOps<Complex<f64>> for DspVec<S, f64, N, D>
where
S: ToSlice<f64>,
N: ComplexNumberSpace,
D: Domain,
{
fn sum_prec(&self) -> Complex<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
move |array, _, _| {
let array = array_to_complex(&array[0..array.len()]);
kahan_sumb(array.iter())
},
);
(&chunks[..]).iter().fold(Complex64::zero(), |a, b| a + b)
}
fn sum_sq_prec(&self) -> Complex<f64> {
let data_length = self.len();
let array = self.data.to_slice();
let chunks = Chunk::get_chunked_results(
Complexity::Small,
&self.multicore_settings,
&array[0..data_length],
2,
(),
move |array, _, _| {
let array = array_to_complex(&array[0..array.len()]);
kahan_sum(array.iter().map(|x| x * x))
},
);
(&chunks[..]).iter().fold(Complex64::zero(), |a, b| a + b)
}
}
impl<T> PreciseStats<T> for Statistics<T>
where
T: RealNumber,
{
#[inline]
fn add_prec(&mut self, elem: T, index: usize, sumc: &mut T, rmsc: &mut T) {
let y = elem - *sumc;
let t = self.sum + y;
*sumc = (t - self.sum) - y;
self.sum = t;
self.count += 1;
let y = elem * elem - *rmsc;
let t = self.rms + y;
*rmsc = (t - self.rms) - y;
self.rms = t;
if elem > self.max {
self.max = elem;
self.max_index = index;
}
if elem < self.min {
self.min = elem;
self.min_index = index;
}
}
}
impl<T> PreciseStats<Complex<T>> for Statistics<Complex<T>>
where
T: RealNumber,
{
#[inline]
fn add_prec(
&mut self,
elem: Complex<T>,
index: usize,
sumc: &mut Complex<T>,
rmsc: &mut Complex<T>,
) {
let y = elem - *sumc;
let t = self.sum + y;
*sumc = (t - self.sum) - y;
self.sum = t;
self.count += 1;
let y = elem * elem - *rmsc;
let t = self.rms + y;
*rmsc = (t - self.rms) - y;
self.rms = t;
if elem.norm() > self.max.norm() {
self.max = elem;
self.max_index = index;
}
if elem.norm() < self.min.norm() {
self.min = elem;
self.min_index = index;
}
}
} | sum
}, |
exceptions.py | class NoTrezorFoundError(Exception):
"""No plugged Trezor wallet was found."""
pass
class | (Exception):
"""The file is invalid and cannot be parsed."""
pass
| InvalidCofrFileError |
trap.py | import asyncio
import aiosnmp
async def handler(host: str, port: int, message: aiosnmp.SnmpV2TrapMessage) -> None: | print(f"oid: {d.oid}, value: {d.value}")
async def main():
p = aiosnmp.SnmpV2TrapServer(
host="127.0.0.1", port=162, communities=("public",), handler=handler
)
await p.run()
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | print(f"got packet from {host}:{port}")
for d in message.data.varbinds: |
save.go | package rewards
import (
"context"
"github.com/jackc/pgx/v4"
"github.com/pkg/errors"
"github.com/barnbridge/meminero/processor/storables/smartyield" | )
func (s *Storable) SaveToDatabase(ctx context.Context, tx pgx.Tx) error {
err := s.saveStakingActions(ctx, tx)
if err != nil {
return errors.Wrap(err, "could not save staking actions")
}
err = s.saveTxHistory(ctx, tx)
if err != nil {
return errors.Wrap(err, "could not save transaction history")
}
err = s.saveClaimEvents(ctx, tx)
if err != nil {
return errors.Wrap(err, "could not save claim events")
}
err = s.saveRewardPools(ctx, tx)
if err != nil {
return errors.Wrap(err, "could not save reward pools")
}
return nil
}
func (s *Storable) saveStakingActions(ctx context.Context, tx pgx.Tx) error {
if len(s.processed.Deposits) == 0 && len(s.processed.Withdrawals) == 0 {
return nil
}
var rows [][]interface{}
for _, e := range s.processed.Deposits {
p := s.state.SmartYield.RewardPoolByAddress(e.Raw.Address.String())
rows = append(rows, []interface{}{
utils.NormalizeAddress(e.User.String()),
e.AmountDecimal(0),
e.BalanceAfterDecimal(0),
smartyield.JuniorStake,
p.PoolAddress,
s.block.BlockCreationTime,
s.block.Number,
utils.NormalizeAddress(e.Raw.TxHash.String()),
e.Raw.TxIndex,
e.Raw.Index,
})
}
for _, e := range s.processed.Withdrawals {
p := s.state.SmartYield.RewardPoolByAddress(e.Raw.Address.String())
rows = append(rows, []interface{}{
utils.NormalizeAddress(e.User.String()),
e.AmountDecimal(0),
e.BalanceAfterDecimal(0),
smartyield.JuniorUnstake,
p.PoolAddress,
s.block.BlockCreationTime,
s.block.Number,
utils.NormalizeAddress(e.Raw.TxHash.String()),
e.Raw.TxIndex,
e.Raw.Index,
})
}
_, err := tx.CopyFrom(
ctx,
pgx.Identifier{"smart_yield", "rewards_staking_actions"},
[]string{
"user_address",
"amount",
"balance_after",
"action_type",
"pool_address",
"block_timestamp",
"included_in_block",
"tx_hash",
"tx_index",
"log_index",
},
pgx.CopyFromRows(rows),
)
if err != nil {
return errors.Wrap(err, "could not copy into rewards_staking_actions")
}
return nil
}
func (s *Storable) saveTxHistory(ctx context.Context, tx pgx.Tx) error {
if len(s.processed.Deposits) == 0 && len(s.processed.Withdrawals) == 0 {
return nil
}
var rows [][]interface{}
for _, e := range s.processed.Deposits {
rows = append(rows, s.txHistory(e.User.String(), e.AmountDecimal(0), smartyield.JuniorTranche, smartyield.JuniorStake, e.Raw))
}
for _, e := range s.processed.Withdrawals {
rows = append(rows, s.txHistory(e.User.String(), e.AmountDecimal(0), smartyield.JuniorTranche, smartyield.JuniorUnstake, e.Raw))
}
_, err := tx.CopyFrom(
ctx,
pgx.Identifier{"smart_yield", "transaction_history"},
[]string{
"protocol_id",
"pool_address",
"underlying_token_address",
"user_address",
"amount",
"tranche",
"transaction_type",
"block_timestamp",
"included_in_block",
"tx_hash",
"tx_index",
"log_index",
},
pgx.CopyFromRows(rows),
)
if err != nil {
return errors.Wrap(err, "could not copy into transaction_history")
}
return nil
}
func (s *Storable) saveClaimEvents(ctx context.Context, tx pgx.Tx) error {
if len(s.processed.Claims) == 0 && len(s.processed.ClaimsMulti) == 0 {
return nil
}
var rows [][]interface{}
for _, e := range s.processed.Claims {
rp := s.state.SmartYield.RewardPoolByAddress(e.Raw.Address.String())
rows = append(rows, []interface{}{
utils.NormalizeAddress(e.User.String()),
e.AmountDecimal(0),
rp.PoolAddress,
rp.RewardTokenAddresses[0],
s.block.BlockCreationTime,
s.block.Number,
utils.NormalizeAddress(e.Raw.TxHash.String()),
e.Raw.TxIndex,
e.Raw.Index,
})
}
for _, e := range s.processed.ClaimsMulti {
rp := s.state.SmartYield.RewardPoolByAddress(e.Raw.Address.String())
rows = append(rows, []interface{}{
utils.NormalizeAddress(e.User.String()),
e.AmountDecimal(0),
rp.PoolAddress,
utils.NormalizeAddress(e.Token.String()),
s.block.BlockCreationTime,
s.block.Number,
utils.NormalizeAddress(e.Raw.TxHash.String()),
e.Raw.TxIndex,
e.Raw.Index,
})
}
_, err := tx.CopyFrom(
ctx,
pgx.Identifier{"smart_yield", "rewards_claims"},
[]string{
"user_address",
"amount",
"pool_address",
"reward_token_address",
"block_timestamp",
"included_in_block",
"tx_hash",
"tx_index",
"log_index",
},
pgx.CopyFromRows(rows),
)
if err != nil {
return errors.Wrap(err, "could not copy into rewards_claims")
}
return nil
}
func (s *Storable) saveRewardPools(ctx context.Context, tx pgx.Tx) error {
if len(s.processed.Pools) == 0 {
return nil
}
for _, p := range s.processed.Pools {
_, err := tx.Exec(ctx, `
insert into smart_yield.reward_pools
(pool_type, pool_address, pool_token_address, reward_token_addresses, start_at_block)
values ($1, $2, $3, $4, $5)
on conflict do nothing
`, p.PoolType, p.PoolAddress, p.PoolTokenAddress, p.RewardTokenAddresses, p.StartAtBlock)
if err != nil {
return errors.Wrap(err, "could not insert pool into db")
}
}
return nil
} | "github.com/barnbridge/meminero/utils" |
model_persistent_volume_claim_metadata.go | package model
import (
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/utils"
"strings"
)
// metadataๆฏ้็พคๅฏน่ฑก็ๅ
ๆฐๆฎๅฎไน๏ผๆฏ้ๅ็ฑป็ๅ
็ด ็ฑปๅ๏ผๅ
ๅซไธ็ป็ฑไธๅๅ็งฐๅฎไน็ๅฑๆงใ
type PersistentVolumeClaimMetadata struct {
// PersistentVolumeClaimๅ็งฐ๏ผๅฏไปฅๅ
ๅซๅฐๅๅญๆฏใๆฐๅญใ่ฟๅญ็ฌฆๅ็น๏ผๅผๅคดๅ็ปๅฐพๅฟ
้กปๆฏๅญๆฏๆๆฐๅญ๏ผๆ้ฟ253ไธชๅญ็ฌฆ๏ผๅไธnamespaceไธnameไธ่ฝ้ๅคใ
Name string `json:"name"`
// PersistentVolumeClaimๆ ็ญพ๏ผkey/valueๅฏนๆ ผๅผใ - Key๏ผๅฟ
้กปไปฅๅญๆฏๆๆฐๅญๅผๅคด๏ผๅฏไปฅๅ
ๅซๅญๆฏใๆฐๅญใ่ฟๅญ็ฌฆใไธๅ็บฟๅ็น๏ผๆ้ฟ63ไธชๅญ็ฌฆ๏ผๅฆๅคๅฏไปฅไฝฟ็จDNSๅญๅไฝไธบๅ็ผ๏ผไพๅฆexample.com/my-key๏ผ DNSๅญๅๆ้ฟ253ไธชๅญ็ฌฆใ - Value๏ผๅฏไปฅไธบ็ฉบๆ่
้็ฉบๅญ็ฌฆไธฒ๏ผ้็ฉบๅญ็ฌฆไธฒๅฟ
้กปไปฅๅญ็ฌฆๆๆฐๅญๅผๅคด๏ผๅฏไปฅๅ
ๅซๅญๆฏใๆฐๅญใ่ฟๅญ็ฌฆใไธๅ็บฟๅ็น๏ผๆ้ฟ63ไธชๅญ็ฌฆใ
Labels *string `json:"labels,omitempty"`
}
func (o PersistentVolumeClaimMetadata) String() string {
data, err := utils.Marshal(o)
if err != nil {
return "PersistentVolumeClaimMetadata struct{}"
}
return strings.Join([]string{"PersistentVolumeClaimMetadata", string(data)}, " ")
}
| ||
get_global_address.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetGlobalAddressResult',
'AwaitableGetGlobalAddressResult',
'get_global_address',
'get_global_address_output',
]
@pulumi.output_type
class GetGlobalAddressResult:
def __init__(__self__, address=None, address_type=None, creation_timestamp=None, description=None, ip_version=None, kind=None, name=None, network=None, network_tier=None, prefix_length=None, purpose=None, region=None, self_link=None, status=None, subnetwork=None, users=None):
if address and not isinstance(address, str):
raise TypeError("Expected argument 'address' to be a str")
pulumi.set(__self__, "address", address)
if address_type and not isinstance(address_type, str):
raise TypeError("Expected argument 'address_type' to be a str")
pulumi.set(__self__, "address_type", address_type)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if ip_version and not isinstance(ip_version, str):
raise TypeError("Expected argument 'ip_version' to be a str")
pulumi.set(__self__, "ip_version", ip_version)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
pulumi.set(__self__, "network", network)
if network_tier and not isinstance(network_tier, str):
raise TypeError("Expected argument 'network_tier' to be a str")
pulumi.set(__self__, "network_tier", network_tier)
if prefix_length and not isinstance(prefix_length, int):
raise TypeError("Expected argument 'prefix_length' to be a int")
pulumi.set(__self__, "prefix_length", prefix_length)
if purpose and not isinstance(purpose, str):
raise TypeError("Expected argument 'purpose' to be a str")
pulumi.set(__self__, "purpose", purpose)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subnetwork and not isinstance(subnetwork, str):
raise TypeError("Expected argument 'subnetwork' to be a str")
pulumi.set(__self__, "subnetwork", subnetwork)
if users and not isinstance(users, list):
raise TypeError("Expected argument 'users' to be a list")
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def address(self) -> str:
"""
The static IP address represented by this resource.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="addressType")
def address_type(self) -> str:
"""
The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.
"""
return pulumi.get(self, "address_type")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this field when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> str:
"""
The IP version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of the resource. Always compute#address for addresses.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> str:
"""
The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with the VPC_PEERING purpose.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="networkTier")
def network_tier(self) -> str:
"""
This signifies the networking tier used for configuring this address and can only take the following values: PREMIUM or STANDARD. Internal IP addresses are always Premium Tier; global external IP addresses are always Premium Tier; regional external IP addresses can be either Standard or Premium Tier. If this field is not specified, it is assumed to be PREMIUM.
"""
return pulumi.get(self, "network_tier")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> int:
"""
The prefix length if the resource represents an IP range.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter
def purpose(self) -> str:
"""
The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter
def region(self) -> str:
"""
The URL of the region where a regional address resides. For regional addresses, you must specify the region as a path parameter in the HTTP request URL. *This field is not applicable to global addresses.*
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def subnetwork(self) -> str:
"""
The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with a GCE_ENDPOINT or DNS_RESOLVER purpose.
"""
return pulumi.get(self, "subnetwork")
@property
@pulumi.getter
def users(self) -> Sequence[str]:
"""
The URLs of the resources that are using this address.
"""
return pulumi.get(self, "users")
class AwaitableGetGlobalAddressResult(GetGlobalAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGlobalAddressResult(
address=self.address, | address_type=self.address_type,
creation_timestamp=self.creation_timestamp,
description=self.description,
ip_version=self.ip_version,
kind=self.kind,
name=self.name,
network=self.network,
network_tier=self.network_tier,
prefix_length=self.prefix_length,
purpose=self.purpose,
region=self.region,
self_link=self.self_link,
status=self.status,
subnetwork=self.subnetwork,
users=self.users)
def get_global_address(address: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGlobalAddressResult:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
__args__ = dict()
__args__['address'] = address
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/v1:getGlobalAddress', __args__, opts=opts, typ=GetGlobalAddressResult).value
return AwaitableGetGlobalAddressResult(
address=__ret__.address,
address_type=__ret__.address_type,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
ip_version=__ret__.ip_version,
kind=__ret__.kind,
name=__ret__.name,
network=__ret__.network,
network_tier=__ret__.network_tier,
prefix_length=__ret__.prefix_length,
purpose=__ret__.purpose,
region=__ret__.region,
self_link=__ret__.self_link,
status=__ret__.status,
subnetwork=__ret__.subnetwork,
users=__ret__.users)
@_utilities.lift_output_func(get_global_address)
def get_global_address_output(address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGlobalAddressResult]:
"""
Returns the specified address resource. Gets a list of available addresses by making a list() request.
"""
... | |
interpreter_table_drop.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_exception::Result;
use common_planners::DropTablePlan;
use common_streams::DataBlockStream;
use common_streams::SendableDataBlockStream;
use crate::catalogs::Catalog;
use crate::interpreters::Interpreter;
use crate::interpreters::InterpreterPtr;
use crate::sessions::QueryContext;
pub struct DropTableInterpreter {
ctx: Arc<QueryContext>,
plan: DropTablePlan,
}
impl DropTableInterpreter {
pub fn try_create(ctx: Arc<QueryContext>, plan: DropTablePlan) -> Result<InterpreterPtr> {
Ok(Arc::new(DropTableInterpreter { ctx, plan }))
}
async fn truncate_history(&self) -> Result<()> {
let db_name = self.plan.db.as_str();
let tbl_name = self.plan.table.as_str();
let tbl = self.ctx.get_table(db_name, tbl_name).await?;
let keep_last_snapshot = false;
tbl.optimize(self.ctx.clone(), keep_last_snapshot).await
}
}
#[async_trait::async_trait]
impl Interpreter for DropTableInterpreter {
fn name(&self) -> &str {
"DropTableInterpreter"
}
async fn | (
&self,
_input_stream: Option<SendableDataBlockStream>,
) -> Result<SendableDataBlockStream> {
// potential errs of truncate_history are ignored
let _ = self.truncate_history().await;
let catalog = self.ctx.get_catalog();
catalog.drop_table(self.plan.clone().into()).await?;
Ok(Box::pin(DataBlockStream::create(
self.plan.schema(),
None,
vec![],
)))
}
}
| execute |
lib.rs | //! The Rust client library for writing streaming applications with Fluvio
//!
//! Fluvio is a high performance, low latency data streaming platform built for developers.
//!
//! When writing streaming applications, two of your core behaviors are producing messages
//! and consuming messages. When you produce a message, you send it to a Fluvio cluster
//! where it is recorded and saved for later usage. When you consume a message, you are
//! reading a previously-stored message from that same Fluvio cluster. Let's get started
//! with a quick example where we produce and consume some messages.
//!
//! # Prerequisites
//!
//! [Install Fluvio](#installation)
//!
//! # Fluvio Echo
//!
//! The easiest way to see Fluvio in action is to produce some messages and to consume
//! them right away. In this sense, we can use Fluvio to make an "echo service".
//!
//! All messages in Fluvio are sent in a sort of category called a `Topic`. You can think
//! of a Topic as a named folder where you want to store some files, which would be your
//! messages. If you're familiar with relational databases, you can think of a Topic as
//! being similar to a database table, but for streaming.
//!
//! As the application developer, you get to decide what Topics you create and which
//! messages you send to them. We need to set up a Topic before running our code. For the
//! echo example, we'll call our topic `echo`.
//!
//! # Example
//!
//! The easiest way to create a Fluvio Topic is by using the [Fluvio CLI].
//!
//! ```bash
//! $ fluvio topic create echo
//! topic "echo" created
//! ```
//!
//! There are convenience methods that let you get up-and-started quickly using default
//! configurations. Later if you want to customize your setup, you can directly use the
//! [`Fluvio`] client object.
//!
//! ```no_run
//! # mod futures {
//! # pub use futures_util::stream::StreamExt;
//! # }
//! use std::time::Duration;
//! use fluvio::{Offset, FluvioError};
//! use futures::StreamExt;
//!
//! async_std::task::spawn(produce_records());
//! if let Err(e) = async_std::task::block_on(consume_records()) {
//! println!("Error: {}", e);
//! }
//!
//! async fn produce_records() -> Result<(), FluvioError> {
//! let producer = fluvio::producer("echo").await?;
//! for i in 0..10u8 {
//! producer.send_record(format!("Hello, Fluvio {}!", i), 0).await?;
//! async_std::task::sleep(Duration::from_secs(1)).await;
//! }
//! Ok(())
//! }
//!
//! async fn consume_records() -> Result<(), FluvioError> {
//! let consumer = fluvio::consumer("echo", 0).await?;
//! let mut stream = consumer.stream(Offset::beginning()).await?;
//!
//! while let Some(Ok(record)) = stream.next().await {
//! let string = String::from_utf8_lossy(record.as_ref());
//! println!("Got record: {}", string);
//! }
//! Ok(())
//! }
//! ```
//!
//! [Fluvio CLI]: https://nightly.fluvio.io/docs/cli/
//! [`Fluvio`]: ./struct.Fluvio.html
#![cfg_attr(
feature = "nightly",
doc(include = "../../../website/kubernetes/INSTALL.md")
)]
mod error;
mod client;
mod admin;
mod params;
pub mod consumer;
mod producer;
mod offset;
mod sync;
mod spu;
pub mod config;
pub use error::FluvioError;
pub use config::FluvioConfig;
pub use producer::TopicProducer;
pub use consumer::{PartitionConsumer, ConsumerConfig};
pub use offset::Offset;
pub use crate::admin::FluvioAdmin;
pub use crate::client::Fluvio;
/// The minimum VERSION of the Fluvio Platform that this client is compatible with.
const MINIMUM_PLATFORM_VERSION: &str = "0.7.0-alpha.5";
/// Creates a producer that sends events to the named topic
///
/// This is a shortcut function that uses the current profile
/// settings. If you need to specify any custom configurations,
/// try directly creating a [`Fluvio`] client object instead.
///
/// # Example
///
/// ```no_run
/// # use fluvio::FluvioError;
/// # async fn do_produce() -> Result<(), FluvioError> {
/// let producer = fluvio::producer("my-topic").await?;
/// producer.send_record("Hello, world!", 0).await?;
/// # Ok(())
/// # }
/// ```
///
/// [`Fluvio`]: ./struct.Fluvio.html
pub async fn producer<S: Into<String>>(topic: S) -> Result<TopicProducer, FluvioError> {
let fluvio = Fluvio::connect().await?;
let producer = fluvio.topic_producer(topic).await?;
Ok(producer)
}
/// Creates a producer that receives events from the given topic and partition
///
/// This is a shortcut function that uses the current profile
/// settings. If you need to specify any custom configurations,
/// try directly creating a [`Fluvio`] client object instead.
///
/// # Example
///
/// ```no_run
/// # use fluvio::{ConsumerConfig, FluvioError, Offset};
/// # mod futures {
/// # pub use futures_util::stream::StreamExt;
/// # }
/// # async fn example() -> Result<(), FluvioError> {
/// use futures::StreamExt;
/// let consumer = fluvio::consumer("my-topic", 0).await?;
/// let mut stream = consumer.stream(Offset::beginning()).await?;
/// while let Some(Ok(record)) = stream.next().await {
/// let string = String::from_utf8_lossy(record.as_ref());
/// println!("Got record: {}", string);
/// }
/// # Ok(())
/// # }
/// ```
///
/// [`Fluvio`]: ./struct.Fluvio.html
pub async fn consumer<S: Into<String>>(
topic: S,
partition: i32,
) -> Result<PartitionConsumer, FluvioError> |
/// re-export metadata from sc-api
pub mod metadata {
pub mod topic {
pub use fluvio_sc_schema::topic::*;
}
pub mod spu {
pub use fluvio_sc_schema::spu::*;
}
pub mod spg {
pub use fluvio_sc_schema::spg::*;
}
pub mod partition {
pub use fluvio_sc_schema::partition::*;
}
pub mod objects {
pub use fluvio_sc_schema::objects::*;
}
pub mod core {
pub use fluvio_sc_schema::core::*;
}
pub mod store {
pub use fluvio_sc_schema::store::*;
}
}
pub mod dataplane {
pub use dataplane::*;
}
| {
let fluvio = Fluvio::connect().await?;
let consumer = fluvio.partition_consumer(topic, partition).await?;
Ok(consumer)
} |
generate.py | #!/usr/bin/python3
from collections import OrderedDict
import sys
import urllib
import xml.etree.ElementTree as etree
import urllib.request
def parse_xml(path):
file = urllib.request.urlopen(path) if path.startswith("http") else open(path, 'r')
with file:
tree = etree.parse(file)
return tree
def patch_file(path, blocks):
result = []
block = None
with open(path, 'r') as file:
for line in file.readlines():
if block:
if line == block:
result.append(line)
block = None
else:
result.append(line)
if line.strip().startswith('/* VOLK_GENERATE_'):
block = line
result.append(blocks[line.strip()[17:-3]])
with open(path, 'w') as file:
for line in result:
file.write(line)
def is_descendant_type(types, name, base):
if name == base:
return True
type = types.get(name)
if not type:
return False
parents = type.get('parent')
if not parents:
return False
return any([is_descendant_type(types, parent, base) for parent in parents.split(',')])
def defined(key):
return 'defined(' + key + ')'
if __name__ == "__main__":
specpath = "https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/master/xml/vk.xml"
if len(sys.argv) > 1:
specpath = sys.argv[1]
spec = parse_xml(specpath)
block_keys = ('DEVICE_TABLE', 'PROTOTYPES_H', 'PROTOTYPES_C', 'LOAD_LOADER', 'LOAD_INSTANCE', 'LOAD_DEVICE', 'LOAD_DEVICE_TABLE')
blocks = {}
version = spec.find('types/type[name="VK_HEADER_VERSION"]')
blocks['VERSION'] = '#define VOLK_HEADER_VERSION ' + version.find('name').tail.strip() + '\n'
command_groups = OrderedDict()
for feature in spec.findall('feature'):
key = defined(feature.get('name'))
cmdrefs = feature.findall('require/command')
command_groups[key] = [cmdref.get('name') for cmdref in cmdrefs]
for ext in sorted(spec.findall('extensions/extension'), key=lambda ext: ext.get('name')):
name = ext.get('name') | if req.get('extension'):
key += ' && ' + defined(req.get('extension'))
cmdrefs = req.findall('command')
command_groups.setdefault(key, []).extend([cmdref.get('name') for cmdref in cmdrefs])
commands_to_groups = OrderedDict()
for (group, cmdnames) in command_groups.items():
for name in cmdnames:
commands_to_groups.setdefault(name, []).append(group)
for (group, cmdnames) in command_groups.items():
command_groups[group] = [name for name in cmdnames if len(commands_to_groups[name]) == 1]
for (name, groups) in commands_to_groups.items():
if len(groups) == 1:
continue
key = ' || '.join(['(' + g + ')' for g in groups])
command_groups.setdefault(key, []).append(name)
commands = {}
for cmd in spec.findall('commands/command'):
if not cmd.get('alias'):
name = cmd.findtext('proto/name')
commands[name] = cmd
for cmd in spec.findall('commands/command'):
if cmd.get('alias'):
name = cmd.get('name')
commands[name] = commands[cmd.get('alias')]
types = {}
for type in spec.findall('types/type'):
name = type.findtext('name')
if name:
types[name] = type
for key in block_keys:
blocks[key] = ''
for (group, cmdnames) in command_groups.items():
ifdef = '#if ' + group + '\n'
for key in block_keys:
blocks[key] += ifdef
for name in sorted(cmdnames):
cmd = commands[name]
type = cmd.findtext('param[1]/type')
if name == 'vkGetInstanceProcAddr':
type = ''
if name == 'vkGetDeviceProcAddr':
type = 'VkInstance'
if is_descendant_type(types, type, 'VkDevice'):
blocks['LOAD_DEVICE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['DEVICE_TABLE'] += '\tPFN_' + name + ' ' + name + ';\n'
blocks['LOAD_DEVICE_TABLE'] += '\ttable->' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif is_descendant_type(types, type, 'VkInstance'):
blocks['LOAD_INSTANCE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif type != '':
blocks['LOAD_LOADER'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['PROTOTYPES_H'] += 'extern PFN_' + name + ' ' + name + ';\n'
blocks['PROTOTYPES_C'] += 'PFN_' + name + ' ' + name + ';\n'
for key in block_keys:
if blocks[key].endswith(ifdef):
blocks[key] = blocks[key][:-len(ifdef)]
else:
blocks[key] += '#endif /* ' + group + ' */\n'
patch_file('volk.h', blocks)
patch_file('volk.c', blocks) | for req in ext.findall('require'):
key = defined(name)
if req.get('feature'):
key += ' && ' + defined(req.get('feature')) |
llnms-scan-network.py | #!/usr/bin/env python
#
# File: llnms-scan-network.py
# Author: Marvin Smith
# Date: 6/13/2015
#
# Purpose: Scan LLNMS networks
#
__author__ = 'Marvin Smith'
# Python Libraries
import argparse, os, sys
# LLNMS Libraries
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
# --------------------------------------------- #
# - Parse Command-Line Arguments - #
# --------------------------------------------- #
def | ():
# Create parser
parser = argparse.ArgumentParser( description='Scan an LLNMS network.' )
# Version
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
# Verbose Mode
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
# Quiet Mode
parser.add_argument('--quiet',
required=False,
default=False,
action='store_true',
help='Do not print output.')
# Network Name
parser.add_argument('-n', '--network',
required=True,
dest='network_input',
help='ID of the network to scan.')
# Scanner Name
parser.add_argument('-s', '--scanner',
required=True,
dest='scanner_input',
help='ID of the scanner to use.')
# Print only passes
parser.add_argument('-om', '--output-mode',
required=False,
dest='output_mode',
default=None,
help='Output mode. Supported options are xml and stdout. If xml provided, then user must provide filename.')
# Return the parser
return parser.parse_args()
# ---------------------------- #
# - Main Function - #
# ---------------------------- #
def Main():
# Grab LLNMS HOME
llnms_home=None
if os.environ['LLNMS_HOME'] is not None:
llnms_home=os.environ['LLNMS_HOME']
# Parse Command-Line Arguments
options = Parse_Command_Line()
# Load the network definition
network = llnms.Network.find_network( network_name=options.network_input,
llnms_home=llnms_home)
# Make sure we found a network
if network is None:
raise Exception('No network found matching name ' + options.network_input)
# Print the Network if Verbose
if options.verbose_flag is True:
print(network.To_Debug_String())
# Load the scanner definition
scanner = llnms.Scanner.find_scanner( scanner_id=options.scanner_input,
llnms_home=llnms_home )
# Make sure we found a scanner
if scanner is None:
raise Exception('No scanner found matching name ' + options.scanner_input)
# Print scanner if verbose
if options.verbose_flag is True:
print(scanner.To_Debug_String())
# Validate the scanner is registered within the network
if network.Has_Scanner( scanner_id=scanner.id ) is False:
raise Exception("Network does not have a scanner registered with id=" + scanner.id )
# Run scan on network
results = scanner.Run_Scan_Range(endpoint_list=network.Get_Network_Range(),
arg_list=network.Get_Scanner_Args(scanner.id),
num_threads=4)
# print results
addresses = network.Get_Network_Range()
for x in xrange(0, len(results)):
print(addresses[x] + ' - ' + str(results[x]))
# ----------------------------- #
# - Run Main Script - #
# ----------------------------- #
if __name__ == '__main__':
Main()
| Parse_Command_Line |
extensions.go | // Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
type extensionsBytes interface {
Message
ExtensionRangeArray() []ExtensionRange
GetExtensions() *[]byte
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) {
if ep, ok := p.(extendableProto); ok {
return ep, ok
}
if ep, ok := p.(extendableProtoV1); ok {
return extensionAdapter{ep}, ok
}
return nil, false
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
type extensionRange interface {
Message
ExtensionRangeArray() []ExtensionRange
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
if ebase, ok := base.(extensionsBytes); ok {
clearExtension(base, id)
ext := ebase.GetExtensions()
*ext = append(*ext, b...)
return
}
epb, ok := extendable(base)
if !ok {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extensionRange, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
buf := *ext
o := 0
for o < len(buf) {
tag, n := DecodeVarint(buf[o:])
fieldNum := int32(tag >> 3)
if int32(fieldNum) == extension.Field {
return true
}
wireType := int(tag & 0x7)
o += n
l, err := size(buf[o:], wireType)
if err != nil {
return false
}
o += l
}
return false
}
// TODO: Check types, field numbers, etc.?
epb, ok := extendable(pb)
if !ok {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok = extmap[extension.Field]
mu.Unlock()
return ok
}
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
ext := pb.GetExtensions()
for offset < len(*ext) {
tag, n1 := DecodeVarint((*ext)[offset:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
n2, err := size((*ext)[offset+n1:], wireType)
if err != nil {
panic(err)
}
newOffset := offset + n1 + n2
if fieldNum == theFieldNum {
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
return offset
}
offset = newOffset
}
return -1
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
clearExtension(pb, extension.Field)
}
func clearExtension(pb Message, fieldNum int32) {
if epb, doki := pb.(extensionsBytes); doki {
offset := 0
for offset != -1 {
offset = deleteExtension(epb, fieldNum, offset)
}
return
}
epb, ok := extendable(pb)
if !ok {
return
}
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, fieldNum)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
o := 0
for o < len(*ext) {
tag, n := DecodeVarint((*ext)[o:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size((*ext)[o+n:], wireType)
if err != nil {
return nil, err
}
if int32(fieldNum) == extension.Field {
v, err := decodeExtension((*ext)[o:o+n+l], extension)
if err != nil {
return nil, err
}
return v, nil
}
o += n + l
}
return defaultExtensionValue(extension)
}
epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
|
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(pb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb)
if !ok {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
if epb, doki := pb.(extensionsBytes); doki {
ClearExtension(pb, extension)
ext := epb.GetExtensions()
et := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
p := NewBuffer(nil)
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
*ext = append(*ext, p.buf...)
return nil
}
epb, ok := extendable(pb)
if !ok {
return errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
*ext = []byte{}
return
}
epb, ok := extendable(pb)
if !ok {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
} | if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
} |
0007_auto__add_field_order_billing_call_prefix__add_field_order_shipping_ca.py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.billing_call_prefix'
db.add_column(u'shop_order', 'billing_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
# Adding field 'Order.shipping_call_prefix'
db.add_column(u'shop_order', 'shipping_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
def | (self, orm):
# Deleting field 'Order.billing_call_prefix'
db.delete_column(u'shop_order', 'billing_call_prefix')
# Deleting field 'Order.shipping_call_prefix'
db.delete_column(u'shop_order', 'shipping_call_prefix')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439968)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439200)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'shop.order': {
'Meta': {'object_name': 'Order'},
'_order_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.TextField', [], {}),
'billing_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'billing_country': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'billing_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'billing_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_subtotal': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'shipping_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'shipping_same_as_billing': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'shipping_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'shop.orderitem': {
'Meta': {'unique_together': "(('order', 'product'),)", 'object_name': 'OrderItem'},
'_line_item_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'_line_item_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_line_item_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'_unit_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sale': ('django.db.models.fields.BooleanField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.ProductVariant']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.TaxClass']", 'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'shop.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'authorized': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['shop.Order']"}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module_key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'shop.orderstatus': {
'Meta': {'object_name': 'OrderStatus'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': u"orm['shop.Order']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '20'})
},
u'shop.taxclass': {
'Meta': {'object_name': 'TaxClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'zai_products.product': {
'Meta': {'object_name': 'Product'},
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['shop.TaxClass']"}),
'tax_included': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
u'zai_products.productvariant': {
'Meta': {'object_name': 'ProductVariant'},
'grip': ('django.db.models.fields.CharField', [], {'default': "'chocolate'", 'max_length': '64'}),
'hand': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'default': "'small'", 'max_length': '64'}),
'lie': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.Product']"}),
'shaft': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64'}),
'special_requirements': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['shop']
| backwards |
64-es2015.js | (window["webpackJsonp"] = window["webpackJsonp"] || []).push([[64],{
/***/ "./node_modules/@ionic/core/dist/esm/ion-split-pane-ios.entry.js":
/*!***********************************************************************!*\
!*** ./node_modules/@ionic/core/dist/esm/ion-split-pane-ios.entry.js ***!
\***********************************************************************/
/*! exports provided: ion_split_pane */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_split_pane", function() { return SplitPane; });
/* harmony import */ var _index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./index-26ec602c.js */ "./node_modules/@ionic/core/dist/esm/index-26ec602c.js");
/* harmony import */ var _ionic_global_1bf1fa84_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./ionic-global-1bf1fa84.js */ "./node_modules/@ionic/core/dist/esm/ionic-global-1bf1fa84.js");
const splitPaneIosCss = ":host{--side-width:100%;left:0;right:0;top:0;bottom:0;display:-ms-flexbox;display:flex;position:absolute;-ms-flex-direction:row;flex-direction:row;-ms-flex-wrap:nowrap;flex-wrap:nowrap;contain:strict}::slotted(ion-menu.menu-pane-visible){-ms-flex:0 1 auto;flex:0 1 auto;width:var(--side-width);min-width:var(--side-min-width);max-width:var(--side-max-width)}:host(.split-pane-visible) ::slotted(.split-pane-side),:host(.split-pane-visible) ::slotted(.split-pane-main){left:0;right:0;top:0;bottom:0;position:relative;-webkit-box-shadow:none !important;box-shadow:none !important;z-index:0}:host(.split-pane-visible) ::slotted(.split-pane-main){-ms-flex:1;flex:1}:host(.split-pane-visible) ::slotted(.split-pane-side:not(ion-menu)),:host(.split-pane-visible) ::slotted(ion-menu.split-pane-side.menu-enabled){display:-ms-flexbox;display:flex;-ms-flex-negative:0;flex-shrink:0}::slotted(.split-pane-side:not(ion-menu)){display:none}:host(.split-pane-visible) ::slotted(.split-pane-side){-ms-flex-order:-1;order:-1}:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){-ms-flex-order:1;order:1}:host{--border:0.55px solid var(--ion-item-border-color, var(--ion-border-color, var(--ion-color-step-250, #c8c7cc)));--side-min-width:270px;--side-max-width:28%}:host(.split-pane-visible) ::slotted(.split-pane-side){border-left:0;border-right:var(--border);border-top:0;border-bottom:0;min-width:var(--side-min-width);max-width:var(--side-max-width)}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){:host(.split-pane-visible) ::slotted(.split-pane-side){border-left:unset;border-right:unset;-webkit-border-start:0;border-inline-start:0;-webkit-border-end:var(--border);border-inline-end:var(--border)}}:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){border-left:var(--border);border-right:0;border-top:0;border-bottom:0;min-width:var(--side-min-width);max-width:var(--side-max-width)}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){border-left:unset;border-right:unset;-webkit-border-start:var(--border);border-inline-start:var(--border);-webkit-border-end:0;border-inline-end:0}}";
const splitPaneMdCss = ":host{--side-width:100%;left:0;right:0;top:0;bottom:0;display:-ms-flexbox;display:flex;position:absolute;-ms-flex-direction:row;flex-direction:row;-ms-flex-wrap:nowrap;flex-wrap:nowrap;contain:strict}::slotted(ion-menu.menu-pane-visible){-ms-flex:0 1 auto;flex:0 1 auto;width:var(--side-width);min-width:var(--side-min-width);max-width:var(--side-max-width)}:host(.split-pane-visible) ::slotted(.split-pane-side),:host(.split-pane-visible) ::slotted(.split-pane-main){left:0;right:0;top:0;bottom:0;position:relative;-webkit-box-shadow:none !important;box-shadow:none !important;z-index:0}:host(.split-pane-visible) ::slotted(.split-pane-main){-ms-flex:1;flex:1}:host(.split-pane-visible) ::slotted(.split-pane-side:not(ion-menu)),:host(.split-pane-visible) ::slotted(ion-menu.split-pane-side.menu-enabled){display:-ms-flexbox;display:flex;-ms-flex-negative:0;flex-shrink:0}::slotted(.split-pane-side:not(ion-menu)){display:none}:host(.split-pane-visible) ::slotted(.split-pane-side){-ms-flex-order:-1;order:-1}:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){-ms-flex-order:1;order:1}:host{--border:1px solid var(--ion-item-border-color, var(--ion-border-color, var(--ion-color-step-150, rgba(0, 0, 0, 0.13))));--side-min-width:270px;--side-max-width:28%}:host(.split-pane-visible) ::slotted(.split-pane-side){border-left:0;border-right:var(--border);border-top:0;border-bottom:0;min-width:var(--side-min-width);max-width:var(--side-max-width)}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){:host(.split-pane-visible) ::slotted(.split-pane-side){border-left:unset;border-right:unset;-webkit-border-start:0;border-inline-start:0;-webkit-border-end:var(--border);border-inline-end:var(--border)}}:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){border-left:var(--border);border-right:0;border-top:0;border-bottom:0;min-width:var(--side-min-width);max-width:var(--side-max-width)}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){:host(.split-pane-visible) ::slotted(.split-pane-side[side=end]){border-left:unset;border-right:unset;-webkit-border-start:var(--border);border-inline-start:var(--border);-webkit-border-end:0;border-inline-end:0}}";
const SPLIT_PANE_MAIN = 'split-pane-main';
const SPLIT_PANE_SIDE = 'split-pane-side';
const QUERY = {
'xs': '(min-width: 0px)',
'sm': '(min-width: 576px)',
'md': '(min-width: 768px)',
'lg': '(min-width: 992px)',
'xl': '(min-width: 1200px)',
'never': ''
};
const SplitPane = class {
constructor(hostRef) {
Object(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
this.visible = false;
/**
* If `true`, the split pane will be hidden.
*/
this.disabled = false;
/**
* When the split-pane should be shown.
* Can be a CSS media query expression, or a shortcut expression.
* Can also be a boolean expression.
*/
this.when = QUERY['lg'];
this.ionSplitPaneVisible = Object(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["d"])(this, "ionSplitPaneVisible", 7);
}
visibleChanged(visible) {
const detail = { visible, isPane: this.isPane.bind(this) };
this.ionSplitPaneVisible.emit(detail);
}
connectedCallback() {
this.styleChildren();
this.updateState();
}
disconnectedCallback() {
if (this.rmL) {
this.rmL();
this.rmL = undefined;
}
}
updateState() {
if (this.rmL) {
this.rmL();
this.rmL = undefined;
}
// Check if the split-pane is disabled
if (this.disabled) {
this.visible = false;
return;
}
// When query is a boolean
const query = this.when;
if (typeof query === 'boolean') {
this.visible = query;
return;
}
// When query is a string, let's find first if it is a shortcut
const mediaQuery = QUERY[query] || query;
// Media query is empty or null, we hide it
if (mediaQuery.length === 0) {
this.visible = false;
return;
}
if (window.matchMedia) {
// Listen on media query
const callback = (q) => {
this.visible = q.matches; | this.rmL = () => mediaList.removeListener(callback);
this.visible = mediaList.matches;
}
}
isPane(element) {
if (!this.visible) {
return false;
}
return element.parentElement === this.el
&& element.classList.contains(SPLIT_PANE_SIDE);
}
styleChildren() {
const contentId = this.contentId;
const children = this.el.children;
const nu = this.el.childElementCount;
let foundMain = false;
for (let i = 0; i < nu; i++) {
const child = children[i];
const isMain = contentId !== undefined && child.id === contentId;
if (isMain) {
if (foundMain) {
console.warn('split pane cannot have more than one main node');
return;
}
foundMain = true;
}
setPaneClass(child, isMain);
}
if (!foundMain) {
console.warn('split pane does not have a specified main node');
}
}
render() {
const mode = Object(_ionic_global_1bf1fa84_js__WEBPACK_IMPORTED_MODULE_1__["b"])(this);
return (Object(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["h"])(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["H"], { class: {
[mode]: true,
// Used internally for styling
[`split-pane-${mode}`]: true,
'split-pane-visible': this.visible
} }, Object(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["h"])("slot", null)));
}
get el() { return Object(_index_26ec602c_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
static get watchers() { return {
"visible": ["visibleChanged"],
"disabled": ["updateState"],
"when": ["updateState"]
}; }
};
const setPaneClass = (el, isMain) => {
let toAdd;
let toRemove;
if (isMain) {
toAdd = SPLIT_PANE_MAIN;
toRemove = SPLIT_PANE_SIDE;
}
else {
toAdd = SPLIT_PANE_SIDE;
toRemove = SPLIT_PANE_MAIN;
}
const classList = el.classList;
classList.add(toAdd);
classList.remove(toRemove);
};
SplitPane.style = {
/*STENCIL:MODE:ios*/ ios: splitPaneIosCss,
/*STENCIL:MODE:md*/ md: splitPaneMdCss
};
/***/ })
}]);
//# sourceMappingURL=64-es2015.js.map | };
const mediaList = window.matchMedia(mediaQuery);
mediaList.addListener(callback); |
test_scaling.py | from unittest import TestCase
from unittest.mock import Mock
import utils
import data
from utils import prepare_data
from tests.mock_data import get_df, get_preproc_config
class TestScaling2d(TestCase):
def setUp(self):
try:
reload(data)
reload(utils)
except NameError:
import importlib
importlib.reload(data)
importlib.reload(utils)
utils.pd.read_csv = Mock(return_value=get_df())
data.Data2d.decompose = Mock()
def test_scaleY(self):
# ensure scaling works
|
def test_scale_revert(self):
c = get_preproc_config(use_exog=True)
d = prepare_data(c)
self.assertAlmostEqual(d.revert(d.trainY)[5+c['horizon']-1],
d.trainYref[5], delta=0.9)
def test_scaleYref(self):
# assert original Y's are not changed after scaling
c = get_preproc_config(use_exog=True, horizon=1)
d = prepare_data(c)
self.assertEqual(52.0, d.trainYref[0])
self.assertEqual(242.0, d.trainYref[-1])
self.assertEqual(292.0, d.valYref[0])
self.assertEqual(322.0, d.valYref[-1])
self.assertEqual(372.0, d.testYref[0])
self.assertEqual(402.0, d.testYref[-1])
def test_scaleX(self):
c = get_preproc_config(use_exog=True)
d = prepare_data(c)
# train
self.assertEqual(0, int(d.trainX[0][0]))
self.assertEqual(1, int(d.trainX[-1][1]))
class TestScaling3d(TestCase):
def setUp(self):
try:
reload(data)
reload(utils)
except NameError:
import importlib
importlib.reload(data)
importlib.reload(utils)
utils.pd.read_csv = Mock(return_value=get_df())
data.Data3d.decompose = Mock()
def test_scaleY(self):
# ensure scaling works
c = get_preproc_config(use_exog=True, horizon=1)
d = prepare_data(c)
d = prepare_data(c, dim="3d")
# train
self.assertEqual(0.0, d.trainY[0])
self.assertEqual(1.0, d.trainY[-1])
# val
self.assertTrue(d.valY[0] != d.valYref[0])
self.assertTrue(d.valY[-1] != d.valYref[-1])
# test
self.assertTrue(d.testY[0] != d.testYref[0])
self.assertTrue(d.testY[-1] != d.testYref[-1])
def test_scale_revert(self):
c = get_preproc_config(use_exog=True)
d = prepare_data(c)
d = prepare_data(c, dim="3d")
self.assertTrue(d.revert(d.trainY)[5], d.trainYref[5])
def test_scaleYref(self):
# assert original Y's are not changed after scaling
c = get_preproc_config(use_exog=True, horizon=1)
d = prepare_data(c)
d = prepare_data(c, dim="3d")
self.assertEqual(52.0, d.trainYref[0])
self.assertEqual(242.0, d.trainYref[-1])
self.assertEqual(292.0, d.valYref[0])
self.assertEqual(322.0, d.valYref[-1])
self.assertEqual(372.0, d.testYref[0])
self.assertEqual(402.0, d.testYref[-1])
def test_scaleX(self):
c = get_preproc_config(use_exog=True)
d = prepare_data(c)
d = prepare_data(c, dim="3d")
# train
self.assertAlmostEqual(0.0, d.trainX[0][0][0], 2)
self.assertAlmostEqual(1.0, d.trainX[-1][-1][-1], 2)
| c = get_preproc_config(use_exog=True, horizon=1)
d = prepare_data(c)
# train
self.assertEqual(0.0, d.trainY[0])
self.assertEqual(1.0, d.trainY[-1])
# val
self.assertTrue(d.valY[c['horizon']-1] != d.valYref[0])
self.assertTrue(d.valY[-1] != d.valYref[-1])
# test
self.assertTrue(d.testY[c['horizon']-1] != d.testYref[0])
self.assertTrue(d.testY[-1] != d.testYref[-1]) |
showEpoch.ts | import { BigNumber } from "ethers";
import { Request, Response } from "express";
import {
ETH_PRICE_PRECISION,
RATIO_MULTIPLIER,
WBTC_PRICE_PRECISION,
ETH_USDC_OPTION_ID,
WBTC_USDC_OPTION_ID
} from "../../constants/constants";
import { PKKTHodlBoosterOption } from "../../typechain";
import {
canSettle,
canShowMoneyMovement,
getDeployedContractHelper,
getOptionStateData,
settlementResubmit,
isTransactionMined,
canShowInitiateSettlement,
getPredictedOptionData
} from "../utilities/utilities";
import { getPredictedEthData } from "./predictedData";
// /show/epoch route
export async function | (req: Request, res: Response) {
const optionVault = await getDeployedContractHelper("PKKTHodlBoosterOption") as PKKTHodlBoosterOption
let round = await optionVault.currentRound();
let optionRound = round - 1;
if (round === 0) {
optionRound = 0;
}
let predictedEthOption = getPredictedOptionData(req.app, ETH_USDC_OPTION_ID);
let predictedWbtcOption = getPredictedOptionData(req.app, WBTC_USDC_OPTION_ID);
let ethOption = {
callStrike: BigNumber.from(0),
putStrike: BigNumber.from(0),
callPremium: 0,
putPremium: 0
};
let wbtcOption = {
callStrike: BigNumber.from(0),
putStrike: BigNumber.from(0),
callPremium: 0,
putPremium: 0
}
try {
// Get contract option data to display
const [
ethCallOptionState,
ethPutOptionState,
wbtcCallOptionState,
wbtcPutOptionState] = await getOptionStateData(optionVault, round);
ethOption = {
callStrike: ethCallOptionState.strikePrice.div(10 ** ETH_PRICE_PRECISION),
putStrike: ethPutOptionState.strikePrice.div(10 ** ETH_PRICE_PRECISION),
callPremium: ethCallOptionState.premiumRate / RATIO_MULTIPLIER,
putPremium: ethPutOptionState.premiumRate / RATIO_MULTIPLIER
}
wbtcOption = {
callStrike: wbtcCallOptionState.strikePrice.div(10 ** WBTC_PRICE_PRECISION),
putStrike: wbtcPutOptionState.strikePrice.div(10 ** WBTC_PRICE_PRECISION),
callPremium: wbtcCallOptionState.premiumRate / RATIO_MULTIPLIER,
putPremium: wbtcPutOptionState.premiumRate / RATIO_MULTIPLIER
}
} catch (err) {
console.error(err);
}
//const initiateSettlementResubmit = settlementResubmit(req.app);
res.render(
"showEpoch",
{
round,
ethOption,
predictedEthOption: predictedEthOption,
wbtcOption,
predictedWbtcOption: predictedWbtcOption,
showInitiateSettlement: await canShowInitiateSettlement(req.app),
showMoneyMovement: (await canShowMoneyMovement(optionVault, round))
}
);
}
| showEpoch |
convert2jpg.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""convert all files in a folder to jpg."""
import argparse
import glob
import ntpath
import os
from PIL import Image
PARSER = argparse.ArgumentParser(description='')
PARSER.add_argument(
'--path_in',
dest='path_in',
default='',
help='folder where the pictures are',
required=True)
PARSER.add_argument(
'--path_out', dest='path_out', default='./', help='Destination folder')
PARSER.add_argument(
'--xsize', dest='xsize', type=int, default=0, help='horizontal size')
PARSER.add_argument(
'--ysize',
dest='ysize',
type=int,
default=0,
help='vertical size, if crop is true, will use xsize instead')
PARSER.add_argument(
'--delete',
dest='delete',
action='store_true',
help='use this flag to delete the original file after conversion')
PARSER.set_defaults(delete=False)
PARSER.add_argument(
'--crop',
dest='crop',
action='store_true',
help='by default the video is cropped')
PARSER.add_argument(
'--strech',
dest='crop',
action='store_false',
help='the video can be streched to a square ratio')
PARSER.set_defaults(crop=True)
ARGS = PARSER.parse_args()
def convert2jpg(path_in, path_out, args):
|
if __name__ == '__main__':
convert2jpg(ARGS.path_in, ARGS.path_out, ARGS)
| """Convert all file in a folder to jpg files.
Args:
path_in: the folder that contains the files to be converted
path_out: the folder to export the converted files
args: the args from the parser
args.crop: a boolean, true for cropping
args.delete: a boolean, true to remove original file
args.xsize: width size of the new jpg
args.ysize: height size of the new jpg
Returns:
nothing
Raises:
nothing
"""
path = '{}/*'.format(path_in)
print 'looking for all files in', path
files = glob.glob(path)
file_count = len(files)
print 'found ', file_count, 'files'
i = 0
for image_file in files:
i += 1
try:
if ntpath.basename(image_file).split('.')[-1] in ['jpg', 'jpeg', 'JPG']:
print i, '/', file_count, ' not converting file', image_file
continue # no need to convert
print i, '/', file_count, ' convert file', image_file
img = Image.open(image_file)
# print 'file open'
if args.xsize > 0:
if args.crop:
args.ysize = args.xsize
# resize the images
small_side = min(img.size)
center = img.size[0] / 2
margin_left = center - small_side / 2
margin_right = margin_left + small_side
img = img.crop((margin_left, 0, margin_right, small_side))
if args.ysize == 0:
args.ysize = args.xsize
img = img.resize((args.xsize, args.ysize), Image.ANTIALIAS)
# save file
# remove old path & old extension:
basename = ntpath.basename(image_file).split('.')[0]
filename = basename + '.jpg'
file_out = os.path.join(path_out, filename)
print i, '/', file_count, ' save file', file_out
img.save(file_out, 'JPEG')
if args.delete:
print 'deleting', image_file
os.remove(image_file)
except: # pylint: disable=bare-except
print """can't convert file""", image_file, 'to jpg :' |
skillbar.js | jQuery('.skillbar').each(function(){
jQuery(this).find('.skillbar-bar').animate({
width:jQuery(this).attr('data-percent') | },2000);
}); |
|
vmware.go | // +build windows
package collector
import (
"errors"
"github.com/StackExchange/wmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
func init() |
// A VmwareCollector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
type VmwareCollector struct {
MemActive *prometheus.Desc
MemBallooned *prometheus.Desc
MemLimit *prometheus.Desc
MemMapped *prometheus.Desc
MemOverhead *prometheus.Desc
MemReservation *prometheus.Desc
MemShared *prometheus.Desc
MemSharedSaved *prometheus.Desc
MemShares *prometheus.Desc
MemSwapped *prometheus.Desc
MemTargetSize *prometheus.Desc
MemUsed *prometheus.Desc
CpuLimitMHz *prometheus.Desc
CpuReservationMHz *prometheus.Desc
CpuShares *prometheus.Desc
CpuStolenTotal *prometheus.Desc
CpuTimeTotal *prometheus.Desc
EffectiveVMSpeedMHz *prometheus.Desc
HostProcessorSpeedMHz *prometheus.Desc
}
// NewVmwareCollector constructs a new VmwareCollector
func NewVmwareCollector() (Collector, error) {
const subsystem = "vmware"
return &VmwareCollector{
MemActive: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_active_bytes"),
"(MemActiveMB)",
nil,
nil,
),
MemBallooned: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_ballooned_bytes"),
"(MemBalloonedMB)",
nil,
nil,
),
MemLimit: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_limit_bytes"),
"(MemLimitMB)",
nil,
nil,
),
MemMapped: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_mapped_bytes"),
"(MemMappedMB)",
nil,
nil,
),
MemOverhead: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_overhead_bytes"),
"(MemOverheadMB)",
nil,
nil,
),
MemReservation: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_reservation_bytes"),
"(MemReservationMB)",
nil,
nil,
),
MemShared: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_shared_bytes"),
"(MemSharedMB)",
nil,
nil,
),
MemSharedSaved: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_shared_saved_bytes"),
"(MemSharedSavedMB)",
nil,
nil,
),
MemShares: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_shares"),
"(MemShares)",
nil,
nil,
),
MemSwapped: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_swapped_bytes"),
"(MemSwappedMB)",
nil,
nil,
),
MemTargetSize: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_target_size_bytes"),
"(MemTargetSizeMB)",
nil,
nil,
),
MemUsed: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "mem_used_bytes"),
"(MemUsedMB)",
nil,
nil,
),
CpuLimitMHz: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_limit_mhz"),
"(CpuLimitMHz)",
nil,
nil,
),
CpuReservationMHz: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_reservation_mhz"),
"(CpuReservationMHz)",
nil,
nil,
),
CpuShares: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_shares"),
"(CpuShares)",
nil,
nil,
),
CpuStolenTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_stolen_seconds_total"),
"(CpuStolenMs)",
nil,
nil,
),
CpuTimeTotal: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "cpu_time_seconds_total"),
"(CpuTimePercents)",
nil,
nil,
),
EffectiveVMSpeedMHz: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "effective_vm_speed_mhz"),
"(EffectiveVMSpeedMHz)",
nil,
nil,
),
HostProcessorSpeedMHz: prometheus.NewDesc(
prometheus.BuildFQName(Namespace, subsystem, "host_processor_speed_mhz"),
"(HostProcessorSpeedMHz)",
nil,
nil,
),
}, nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
if desc, err := c.collectMem(ch); err != nil {
log.Error("failed collecting vmware memory metrics:", desc, err)
return err
}
if desc, err := c.collectCpu(ch); err != nil {
log.Error("failed collecting vmware cpu metrics:", desc, err)
return err
}
return nil
}
type Win32_PerfRawData_vmGuestLib_VMem struct {
MemActiveMB uint64
MemBalloonedMB uint64
MemLimitMB uint64
MemMappedMB uint64
MemOverheadMB uint64
MemReservationMB uint64
MemSharedMB uint64
MemSharedSavedMB uint64
MemShares uint64
MemSwappedMB uint64
MemTargetSizeMB uint64
MemUsedMB uint64
}
type Win32_PerfRawData_vmGuestLib_VCPU struct {
CpuLimitMHz uint64
CpuReservationMHz uint64
CpuShares uint64
CpuStolenMs uint64
CpuTimePercents uint64
EffectiveVMSpeedMHz uint64
HostProcessorSpeedMHz uint64
}
func (c *VmwareCollector) collectMem(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_vmGuestLib_VMem
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.MemActive,
prometheus.GaugeValue,
mbToBytes(dst[0].MemActiveMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemBallooned,
prometheus.GaugeValue,
mbToBytes(dst[0].MemBalloonedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemLimit,
prometheus.GaugeValue,
mbToBytes(dst[0].MemLimitMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemMapped,
prometheus.GaugeValue,
mbToBytes(dst[0].MemMappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemOverhead,
prometheus.GaugeValue,
mbToBytes(dst[0].MemOverheadMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemReservation,
prometheus.GaugeValue,
mbToBytes(dst[0].MemReservationMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemShared,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSharedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemSharedSaved,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSharedSavedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemShares,
prometheus.GaugeValue,
float64(dst[0].MemShares),
)
ch <- prometheus.MustNewConstMetric(
c.MemSwapped,
prometheus.GaugeValue,
mbToBytes(dst[0].MemSwappedMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemTargetSize,
prometheus.GaugeValue,
mbToBytes(dst[0].MemTargetSizeMB),
)
ch <- prometheus.MustNewConstMetric(
c.MemUsed,
prometheus.GaugeValue,
mbToBytes(dst[0].MemUsedMB),
)
return nil, nil
}
func mbToBytes(mb uint64) float64 {
return float64(mb * 1024 * 1024)
}
func (c *VmwareCollector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
var dst []Win32_PerfRawData_vmGuestLib_VCPU
q := queryAll(&dst)
if err := wmi.Query(q, &dst); err != nil {
return nil, err
}
if len(dst) == 0 {
return nil, errors.New("WMI query returned empty result set")
}
ch <- prometheus.MustNewConstMetric(
c.CpuLimitMHz,
prometheus.GaugeValue,
float64(dst[0].CpuLimitMHz),
)
ch <- prometheus.MustNewConstMetric(
c.CpuReservationMHz,
prometheus.GaugeValue,
float64(dst[0].CpuReservationMHz),
)
ch <- prometheus.MustNewConstMetric(
c.CpuShares,
prometheus.GaugeValue,
float64(dst[0].CpuShares),
)
ch <- prometheus.MustNewConstMetric(
c.CpuStolenTotal,
prometheus.CounterValue,
float64(dst[0].CpuStolenMs)*ticksToSecondsScaleFactor,
)
ch <- prometheus.MustNewConstMetric(
c.CpuTimeTotal,
prometheus.CounterValue,
float64(dst[0].CpuTimePercents)*ticksToSecondsScaleFactor,
)
ch <- prometheus.MustNewConstMetric(
c.EffectiveVMSpeedMHz,
prometheus.GaugeValue,
float64(dst[0].EffectiveVMSpeedMHz),
)
ch <- prometheus.MustNewConstMetric(
c.HostProcessorSpeedMHz,
prometheus.GaugeValue,
float64(dst[0].HostProcessorSpeedMHz),
)
return nil, nil
}
| {
Factories["vmware"] = NewVmwareCollector
} |
index.js | export default class Manager {
/**
* @param {Function} rollbackFn will execute when call undo or redo method. pass the stepDetail and isLastRollback for the function(require return a stepDetail to push on undo/redo stack)
* @param {Number} maxStep max stored undoList and redoList
*/
constructor(rollbackFn, maxStep = 500) {
// if (typeof undoFn !== 'function') throw new Error('not define undoFn or undoFn is not a function')
// if (typeof redoFn !== 'function') throw new Error('not define redoFn or redoFn is not a function')
if (typeof rollbackFn !== 'function') throw new Error('not define rollbackFn or rollbackFn is not a function')
// this._undoFn = undoFn
// this._redoFn = redoFn
this._rollbackFn = rollbackFn
this._maxStep = maxStep
this._undoStack = []
this._redoStack = []
}
/**
* pushes a stepDetail on the undo stack, and clears the redo stack
* @param {*} stepDetail the stepDetail to push on last of the undo stack
*/
push(stepDetail) {
if (!stepDetail) throw new Error('no stepDetail')
this._redoStack = []
this._undoStack.push(stepDetail)
if (this._undoStack.length > this._maxStep) this._undoStack.splice(0, 1)
return this
}
/**
* call once or more rollbackFn function, and push rollbackFn returns to redo stack
* @param {Number} stepNum number of undo times
*/
undo(stepNum = 1) {
if (!this.canUndo) return this
if (stepNum > this._undoStack.length) stepNum = this._undoStack.length
let stepDetail = this._rollbackFn(this._undoStack[this._undoStack.length - 1], stepNum === 1)
this._undoStack.pop()
if (!stepDetail) throw new Error('the rollbackFn not return a stepDetail, manager can not be work well')
this._redoStack.push(stepDetail)
if (stepNum > 1) this.undo(stepNum - 1)
return this
}
/**
* call once or more rollbackFn function, and push rollbackFn returns to undo stack
* @param {Number} stepNum number of redo times
*/
redo(stepNum = 1) {
if (!this.canRedo) return this
if (stepNum > this._redoStack.length) stepNum = this._redoStack.length
this._undoStack.push(this._rollbackFn(this._redoStack[this._redoStack.length - 1], stepNum === 1))
this._redoStack.pop()
if (stepNum > 1) this.redo(stepNum - 1)
return this
} | */
clear() {
this._undoStack = []
this._redoStack = []
}
get canUndo() {
return !!this._undoStack.length
}
get canRedo() {
return !!this._redoStack.length
}
get undoStack() {
return this._undoStack
}
get redoStack() {
return this._redoStack
}
} |
/**
* clear undo stack and redo stack |
2-12(strChange).py |
print("์ค๋์ " + str(year) + "๋
" + str(month) + "์ " + str(day) + "์ผ " + date +"์
๋๋ค.") | year = 2018
month = 7
day = 5
date = "๋ชฉ์์ผ" |
|
verification.rs | // Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/// RSA PKCS#1 1.5 signatures.
use {bits, digest, error, private, signature};
use super::{bigint, N, PUBLIC_KEY_PUBLIC_MODULUS_MAX_LEN, RSAParameters,
parse_public_key};
use untrusted;
impl signature::VerificationAlgorithm for RSAParameters {
fn verify(&self, public_key: untrusted::Input, msg: untrusted::Input,
signature: untrusted::Input)
-> Result<(), error::Unspecified> {
let public_key = try!(parse_public_key(public_key));
verify_rsa(self, public_key, msg, signature)
}
}
impl private::Private for RSAParameters {}
macro_rules! rsa_params {
( $VERIFY_ALGORITHM:ident, $min_bits:expr, $PADDING_ALGORITHM:expr,
$doc_str:expr ) => {
#[doc=$doc_str]
///
/// Only available in `use_heap` mode.
pub static $VERIFY_ALGORITHM: RSAParameters =
RSAParameters {
padding_alg: $PADDING_ALGORITHM,
min_bits: bits::BitLength($min_bits),
};
}
}
rsa_params!(RSA_PKCS1_2048_8192_SHA1, 2048, &super::padding::RSA_PKCS1_SHA1,
"Verification of signatures using RSA keys of 2048-8192 bits,
PKCS#1.5 padding, and SHA-1.\n\nSee \"`RSA_PKCS1_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PKCS1_2048_8192_SHA256, 2048, &super::RSA_PKCS1_SHA256,
"Verification of signatures using RSA keys of 2048-8192 bits,
PKCS#1.5 padding, and SHA-256.\n\nSee \"`RSA_PKCS1_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PKCS1_2048_8192_SHA384, 2048, &super::RSA_PKCS1_SHA384,
"Verification of signatures using RSA keys of 2048-8192 bits,
PKCS#1.5 padding, and SHA-384.\n\nSee \"`RSA_PKCS1_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PKCS1_2048_8192_SHA512, 2048, &super::RSA_PKCS1_SHA512,
"Verification of signatures using RSA keys of 2048-8192 bits,
PKCS#1.5 padding, and SHA-512.\n\nSee \"`RSA_PKCS1_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PKCS1_3072_8192_SHA384, 3072, &super::RSA_PKCS1_SHA384,
"Verification of signatures using RSA keys of 3072-8192 bits,
PKCS#1.5 padding, and SHA-384.\n\nSee \"`RSA_PKCS1_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PSS_2048_8192_SHA256, 2048, &super::RSA_PSS_SHA256,
"Verification of signatures using RSA keys of 2048-8192 bits,
PSS padding, and SHA-256.\n\nSee \"`RSA_PSS_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PSS_2048_8192_SHA384, 2048, &super::RSA_PSS_SHA384,
"Verification of signatures using RSA keys of 2048-8192 bits,
PSS padding, and SHA-384.\n\nSee \"`RSA_PSS_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
rsa_params!(RSA_PSS_2048_8192_SHA512, 2048, &super::RSA_PSS_SHA512,
"Verification of signatures using RSA keys of 2048-8192 bits,
PSS padding, and SHA-512.\n\nSee \"`RSA_PSS_*` Details\" in
`ring::signature`'s module-level documentation for more details.");
/// Lower-level API for the verification of RSA signatures.
///
/// When the public key is in DER-encoded PKCS#1 ASN.1 format, it is
/// recommended to use `ring::signature::verify()` with
/// `ring::signature::RSA_PKCS1_*`, because `ring::signature::verify()`
/// will handle the parsing in that case. Otherwise, this function can be used
/// to pass in the raw bytes for the public key components as
/// `untrusted::Input` arguments.
///
/// `params` determine what algorithm parameters (padding, digest algorithm,
/// key length range, etc.) are used in the verification. `msg` is the message
/// and `signature` is the signature.
///
/// `n` is the public key modulus and `e` is the public key exponent. Both are
/// interpreted as unsigned big-endian encoded values. Both must be positive
/// and neither may have any leading zeros.
//
// There are a small number of tests that test `verify_rsa` directly, but the
// test coverage for this function mostly depends on the test coverage for the
// `signature::VerificationAlgorithm` implementation for `RSAParameters`. If we
// change that, test coverage for `verify_rsa()` will need to be reconsidered.
// (The NIST test vectors were originally in a form that was optimized for
// testing `verify_rsa` directly, but the testing work for RSA PKCS#1
// verification was done during the implementation of
// `signature::VerificationAlgorithm`, before `verify_rsa` was factored out).
pub fn verify_rsa(params: &RSAParameters,
(n, e): (untrusted::Input, untrusted::Input),
msg: untrusted::Input, signature: untrusted::Input)
-> Result<(), error::Unspecified> {
// Partially validate the public key. See
// `check_public_modulus_and_exponent()` for more details.
let n = try!(bigint::Positive::from_be_bytes(n));
let e = try!(bigint::Positive::from_be_bytes(e));
let max_bits = try!(bits::BitLength::from_usize_bytes(
PUBLIC_KEY_PUBLIC_MODULUS_MAX_LEN));
// XXX: FIPS 186-4 seems to indicate that the minimum
// exponent value is 2**16 + 1, but it isn't clear if this is just for
// signing or also for verification. We support exponents of 3 and larger
// for compatibility with other commonly-used crypto libraries.
let e_min_bits = bits::BitLength::from_usize_bits(2);
let (n, e) =
try!(super::check_public_modulus_and_exponent(n, e, params.min_bits,
max_bits, e_min_bits));
let n_bits = n.bit_length();
let n = try!(n.into_modulus::<N>());
// The signature must be the same length as the modulus, in bytes.
if signature.len() != n_bits.as_usize_bytes_rounded_up() {
return Err(error::Unspecified);
}
// RFC 8017 Section 5.2.2: RSAVP1.
// Step 1.
let s = try!(bigint::Positive::from_be_bytes_padded(signature));
let s = try!(s.into_elem::<N>(&n));
// Step 2.
let s = {
// Montgomery encode `s`.
let oneRR = try!(bigint::One::newRR(&n));
try!(bigint::elem_mul(oneRR.as_ref(), s, &n))
};
let m = try!(bigint::elem_exp_vartime(s, e, &n));
let m = try!(m.into_unencoded(&n));
// Step 3.
let mut decoded = [0u8; PUBLIC_KEY_PUBLIC_MODULUS_MAX_LEN];
let decoded = &mut decoded[..n_bits.as_usize_bytes_rounded_up()];
m.fill_be_bytes(decoded);
// Verify the padded message is correct.
let m_hash = digest::digest(params.padding_alg.digest_alg(),
msg.as_slice_less_safe());
untrusted::Input::from(decoded).read_all(
error::Unspecified, |m| params.padding_alg.verify(&m_hash, m, n_bits))
}
#[cfg(test)]
mod tests {
// We intentionally avoid `use super::*` so that we are sure to use only
// the public API; this ensures that enough of the API is public.
use {der, error, signature, test};
use untrusted;
#[test]
fn test_signature_rsa_pkcs1_verify() {
test::from_file("src/rsa/rsa_pkcs1_verify_tests.txt",
|section, test_case| {
assert_eq!(section, "");
let digest_name = test_case.consume_string("Digest");
let alg = match digest_name.as_ref() {
"SHA1" => &signature::RSA_PKCS1_2048_8192_SHA1,
"SHA256" => &signature::RSA_PKCS1_2048_8192_SHA256,
"SHA384" => &signature::RSA_PKCS1_2048_8192_SHA384,
"SHA512" => &signature::RSA_PKCS1_2048_8192_SHA512,
_ => { panic!("Unsupported digest: {}", digest_name) }
};
let public_key = test_case.consume_bytes("Key");
let public_key = untrusted::Input::from(&public_key);
// Sanity check that we correctly DER-encoded the originally-
// provided separate (n, e) components. When we add test vectors
// for improperly-encoded signatures, we'll have to revisit this.
assert!(public_key.read_all(error::Unspecified, |input| {
der::nested(input, der::Tag::Sequence, error::Unspecified,
|input| {
let _ = try!(der::positive_integer(input));
let _ = try!(der::positive_integer(input));
Ok(())
})
}).is_ok());
let msg = test_case.consume_bytes("Msg");
let msg = untrusted::Input::from(&msg);
let sig = test_case.consume_bytes("Sig");
let sig = untrusted::Input::from(&sig);
let expected_result = test_case.consume_string("Result");
let actual_result = signature::verify(alg, public_key, msg, sig);
assert_eq!(actual_result.is_ok(), expected_result == "P");
Ok(())
});
}
#[test]
fn test_signature_rsa_pss_verify() {
test::from_file("src/rsa/rsa_pss_verify_tests.txt",
|section, test_case| {
assert_eq!(section, "");
let digest_name = test_case.consume_string("Digest");
let alg = match digest_name.as_ref() {
"SHA256" => &signature::RSA_PSS_2048_8192_SHA256,
"SHA384" => &signature::RSA_PSS_2048_8192_SHA384,
"SHA512" => &signature::RSA_PSS_2048_8192_SHA512,
_ => { panic!("Unsupported digest: {}", digest_name) }
};
let public_key = test_case.consume_bytes("Key");
let public_key = untrusted::Input::from(&public_key);
// Sanity check that we correctly DER-encoded the originally-
// provided separate (n, e) components. When we add test vectors
// for improperly-encoded signatures, we'll have to revisit this.
assert!(public_key.read_all(error::Unspecified, |input| {
der::nested(input, der::Tag::Sequence, error::Unspecified,
|input| {
let _ = try!(der::positive_integer(input));
let _ = try!(der::positive_integer(input));
Ok(())
})
}).is_ok());
let msg = test_case.consume_bytes("Msg");
let msg = untrusted::Input::from(&msg);
let sig = test_case.consume_bytes("Sig");
let sig = untrusted::Input::from(&sig);
let expected_result = test_case.consume_string("Result");
let actual_result = signature::verify(alg, public_key, msg, sig);
assert_eq!(actual_result.is_ok(), expected_result == "P");
Ok(())
});
}
// Test for `primitive::verify()`. Read public key parts from a file
// and use them to verify a signature.
#[test]
fn | () {
test::from_file("src/rsa/rsa_primitive_verify_tests.txt",
|section, test_case| {
assert_eq!(section, "");
let n = test_case.consume_bytes("n");
let e = test_case.consume_bytes("e");
let msg = test_case.consume_bytes("Msg");
let sig = test_case.consume_bytes("Sig");
let expected = test_case.consume_string("Result");
let result = signature::primitive::verify_rsa(
&signature::RSA_PKCS1_2048_8192_SHA256,
(untrusted::Input::from(&n), untrusted::Input::from(&e)),
untrusted::Input::from(&msg), untrusted::Input::from(&sig));
assert_eq!(result.is_ok(), expected == "Pass");
Ok(())
})
}
}
| test_signature_rsa_primitive_verification |
organization.entity.d.ts | import { User } from "./user.entity";
export declare class | {
id: number;
name: string;
users: Array<User>;
parentId: number;
parent: Organization;
children: Array<Organization>;
}
| Organization |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.